core_validation.cpp revision 1b8a6bd9260c52ca6c6cfe1e34f142f93e1e2cc6
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30#include <SPIRV/spirv.hpp>
31#include <algorithm>
32#include <assert.h>
33#include <iostream>
34#include <list>
35#include <map>
36#include <mutex>
37#include <set>
38//#include <memory>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <string>
43#include <tuple>
44
45#include "vk_loader_platform.h"
46#include "vk_dispatch_table_helper.h"
47#include "vk_struct_string_helper_cpp.h"
48#if defined(__GNUC__)
49#pragma GCC diagnostic ignored "-Wwrite-strings"
50#endif
51#if defined(__GNUC__)
52#pragma GCC diagnostic warning "-Wwrite-strings"
53#endif
54#include "vk_struct_size_helper.h"
55#include "core_validation.h"
56#include "vk_layer_table.h"
57#include "vk_layer_data.h"
58#include "vk_layer_extension_utils.h"
59#include "vk_layer_utils.h"
60#include "spirv-tools/libspirv.h"
61
62#if defined __ANDROID__
63#include <android/log.h>
64#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
65#else
66#define LOGCONSOLE(...)                                                                                                            \
67    {                                                                                                                              \
68        printf(__VA_ARGS__);                                                                                                       \
69        printf("\n");                                                                                                              \
70    }
71#endif
72
73// This intentionally includes a cpp file
74#include "vk_safe_struct.cpp"
75
76using namespace std;
77
78namespace core_validation {
79
80using std::unordered_map;
81using std::unordered_set;
82
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86// 2nd special memory handle used to flag object as unbound from memory
87static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
88
89struct devExts {
90    bool wsi_enabled;
91    bool wsi_display_swapchain_enabled;
92    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
93    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
94};
95
96// fwd decls
97struct shader_module;
98
99struct instance_layer_data {
100    VkInstance instance = VK_NULL_HANDLE;
101    debug_report_data *report_data = nullptr;
102    std::vector<VkDebugReportCallbackEXT> logging_callback;
103    VkLayerInstanceDispatchTable dispatch_table;
104
105    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
106    uint32_t physical_devices_count = 0;
107    CHECK_DISABLED disabled = {};
108
109    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
110    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
111
112    bool surfaceExtensionEnabled = false;
113    bool displayExtensionEnabled = false;
114#ifdef VK_USE_PLATFORM_ANDROID_KHR
115    bool androidSurfaceExtensionEnabled = false;
116#endif
117#ifdef VK_USE_PLATFORM_MIR_KHR
118    bool mirSurfaceExtensionEnabled = false;
119#endif
120#ifdef VK_USE_PLATFORM_WAYLAND_KHR
121    bool waylandSurfaceExtensionEnabled = false;
122#endif
123#ifdef VK_USE_PLATFORM_WIN32_KHR
124    bool win32SurfaceExtensionEnabled = false;
125#endif
126#ifdef VK_USE_PLATFORM_XCB_KHR
127    bool xcbSurfaceExtensionEnabled = false;
128#endif
129#ifdef VK_USE_PLATFORM_XLIB_KHR
130    bool xlibSurfaceExtensionEnabled = false;
131#endif
132};
133
134struct layer_data {
135    debug_report_data *report_data = nullptr;
136    VkLayerDispatchTable dispatch_table;
137
138    devExts device_extensions = {};
139    unordered_set<VkQueue> queues;  // All queues under given device
140    // Global set of all cmdBuffers that are inFlight on this device
141    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
142    // Layer specific data
143    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
144    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
145    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
146    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
147    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
148    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
149    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
150    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
151    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
152    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
153    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
154    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
155    unordered_map<VkFence, FENCE_NODE> fenceMap;
156    unordered_map<VkQueue, QUEUE_NODE> queueMap;
157    unordered_map<VkEvent, EVENT_STATE> eventMap;
158    unordered_map<QueryObject, bool> queryToStateMap;
159    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
160    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
161    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
162    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
163    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
164    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
165    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
166    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
167    VkDevice device = VK_NULL_HANDLE;
168
169    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
170
171    VkPhysicalDeviceFeatures enabled_features = {};
172    // Device specific data
173    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
174    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
175};
176
177// TODO : Do we need to guard access to layer_data_map w/ lock?
178static unordered_map<void *, layer_data *> layer_data_map;
179static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
180
181static const VkLayerProperties global_layer = {
182    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
183};
184
185template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
186    bool foundLayer = false;
187    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
188        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
189            foundLayer = true;
190        }
191        // This has to be logged to console as we don't have a callback at this point.
192        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
193            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
194                       global_layer.layerName);
195        }
196    }
197}
198
199// Code imported from shader_checker
200static void build_def_index(shader_module *);
201
202// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
203// without the caller needing to care too much about the physical SPIRV module layout.
204struct spirv_inst_iter {
205    std::vector<uint32_t>::const_iterator zero;
206    std::vector<uint32_t>::const_iterator it;
207
208    uint32_t len() {
209        auto result = *it >> 16;
210        assert(result > 0);
211        return result;
212    }
213
214    uint32_t opcode() { return *it & 0x0ffffu; }
215
216    uint32_t const &word(unsigned n) {
217        assert(n < len());
218        return it[n];
219    }
220
221    uint32_t offset() { return (uint32_t)(it - zero); }
222
223    spirv_inst_iter() {}
224
225    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
226
227    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
228
229    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
230
231    spirv_inst_iter operator++(int) { /* x++ */
232        spirv_inst_iter ii = *this;
233        it += len();
234        return ii;
235    }
236
237    spirv_inst_iter operator++() { /* ++x; */
238        it += len();
239        return *this;
240    }
241
242    /* The iterator and the value are the same thing. */
243    spirv_inst_iter &operator*() { return *this; }
244    spirv_inst_iter const &operator*() const { return *this; }
245};
246
247struct shader_module {
248    /* the spirv image itself */
249    vector<uint32_t> words;
250    /* a mapping of <id> to the first word of its def. this is useful because walking type
251     * trees, constant expressions, etc requires jumping all over the instruction stream.
252     */
253    unordered_map<unsigned, unsigned> def_index;
254
255    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
256        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
257          def_index() {
258
259        build_def_index(this);
260    }
261
262    /* expose begin() / end() to enable range-based for */
263    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
264    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
265    /* given an offset into the module, produce an iterator there. */
266    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
267
268    /* gets an iterator to the definition of an id */
269    spirv_inst_iter get_def(unsigned id) const {
270        auto it = def_index.find(id);
271        if (it == def_index.end()) {
272            return end();
273        }
274        return at(it->second);
275    }
276};
277
278// TODO : This can be much smarter, using separate locks for separate global data
279static std::mutex global_lock;
280
281// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
282IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
283    auto iv_it = dev_data->imageViewMap.find(image_view);
284    if (iv_it == dev_data->imageViewMap.end()) {
285        return nullptr;
286    }
287    return iv_it->second.get();
288}
289// Return sampler node ptr for specified sampler or else NULL
290SAMPLER_STATE *getSamplerState(const layer_data *dev_data, VkSampler sampler) {
291    auto sampler_it = dev_data->samplerMap.find(sampler);
292    if (sampler_it == dev_data->samplerMap.end()) {
293        return nullptr;
294    }
295    return sampler_it->second.get();
296}
297// Return image node ptr for specified image or else NULL
298IMAGE_STATE *getImageState(const layer_data *dev_data, VkImage image) {
299    auto img_it = dev_data->imageMap.find(image);
300    if (img_it == dev_data->imageMap.end()) {
301        return nullptr;
302    }
303    return img_it->second.get();
304}
305// Return buffer node ptr for specified buffer or else NULL
306BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
307    auto buff_it = dev_data->bufferMap.find(buffer);
308    if (buff_it == dev_data->bufferMap.end()) {
309        return nullptr;
310    }
311    return buff_it->second.get();
312}
313// Return swapchain node for specified swapchain or else NULL
314SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
315    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
316    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
317        return nullptr;
318    }
319    return swp_it->second.get();
320}
321// Return swapchain for specified image or else NULL
322VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
323    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
324    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
325        return VK_NULL_HANDLE;
326    }
327    return img_it->second;
328}
329// Return buffer node ptr for specified buffer or else NULL
330BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
331    auto bv_it = my_data->bufferViewMap.find(buffer_view);
332    if (bv_it == my_data->bufferViewMap.end()) {
333        return nullptr;
334    }
335    return bv_it->second.get();
336}
337
338FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
339    auto it = dev_data->fenceMap.find(fence);
340    if (it == dev_data->fenceMap.end()) {
341        return nullptr;
342    }
343    return &it->second;
344}
345
346EVENT_STATE *getEventNode(layer_data *dev_data, VkEvent event) {
347    auto it = dev_data->eventMap.find(event);
348    if (it == dev_data->eventMap.end()) {
349        return nullptr;
350    }
351    return &it->second;
352}
353
354QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
355    auto it = dev_data->queryPoolMap.find(query_pool);
356    if (it == dev_data->queryPoolMap.end()) {
357        return nullptr;
358    }
359    return &it->second;
360}
361
362QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
363    auto it = dev_data->queueMap.find(queue);
364    if (it == dev_data->queueMap.end()) {
365        return nullptr;
366    }
367    return &it->second;
368}
369
370SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
371    auto it = dev_data->semaphoreMap.find(semaphore);
372    if (it == dev_data->semaphoreMap.end()) {
373        return nullptr;
374    }
375    return &it->second;
376}
377
378COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
379    auto it = dev_data->commandPoolMap.find(pool);
380    if (it == dev_data->commandPoolMap.end()) {
381        return nullptr;
382    }
383    return &it->second;
384}
385
386PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
387    auto it = instance_data->physical_device_map.find(phys);
388    if (it == instance_data->physical_device_map.end()) {
389        return nullptr;
390    }
391    return &it->second;
392}
393
394SURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
395    auto it = instance_data->surface_map.find(surface);
396    if (it == instance_data->surface_map.end()) {
397        return nullptr;
398    }
399    return &it->second;
400}
401
402// Return ptr to memory binding for given handle of specified type
403static BINDABLE *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
404    switch (type) {
405    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
406        return getImageState(my_data, VkImage(handle));
407    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
408        return getBufferNode(my_data, VkBuffer(handle));
409    default:
410        break;
411    }
412    return nullptr;
413}
414// prototype
415static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
416
417// Helper function to validate correct usage bits set for buffers or images
418//  Verify that (actual & desired) flags != 0 or,
419//   if strict is true, verify that (actual & desired) flags == desired
420//  In case of error, report it via dbg callbacks
421static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle,
422                                 VkDebugReportObjectTypeEXT obj_type, int32_t const msgCode, char const *ty_str,
423                                 char const *func_name, char const *usage_str) {
424    bool correct_usage = false;
425    bool skip_call = false;
426    if (strict)
427        correct_usage = ((actual & desired) == desired);
428    else
429        correct_usage = ((actual & desired) != 0);
430    if (!correct_usage) {
431        if (msgCode == -1) {
432            // TODO: Fix callers with msgCode == -1 to use correct validation checks.
433            skip_call =
434                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
435                        MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
436                                                            " used by %s. In this case, %s should have %s set during creation.",
437                        ty_str, obj_handle, func_name, ty_str, usage_str);
438        } else {
439            const char *valid_usage = (msgCode == -1) ? "" : validation_error_map[msgCode];
440            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, msgCode, "MEM",
441                                "Invalid usage flag for %s 0x%" PRIxLEAST64
442                                " used by %s. In this case, %s should have %s set during creation. %s",
443                                ty_str, obj_handle, func_name, ty_str, usage_str, valid_usage);
444        }
445    }
446    return skip_call;
447}
448
449// Helper function to validate usage flags for buffers
450// For given buffer_node send actual vs. desired usage off to helper above where
451//  an error will be flagged if usage is not correct
452static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFlags desired, VkBool32 strict,
453                                    int32_t const msgCode, char const *func_name, char const *usage_string) {
454    return validate_usage_flags(dev_data, image_state->createInfo.usage, desired, strict,
455                                reinterpret_cast<const uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
456                                msgCode, "image", func_name, usage_string);
457}
458
459// Helper function to validate usage flags for buffers
460// For given buffer_node send actual vs. desired usage off to helper above where
461//  an error will be flagged if usage is not correct
462static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_NODE const *buffer_node, VkFlags desired, VkBool32 strict,
463                                     int32_t const msgCode, char const *func_name, char const *usage_string) {
464    return validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict,
465                                reinterpret_cast<const uint64_t &>(buffer_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
466                                msgCode, "buffer", func_name, usage_string);
467}
468
469// Return ptr to info in map container containing mem, or NULL if not found
470//  Calls to this function should be wrapped in mutex
471DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
472    auto mem_it = dev_data->memObjMap.find(mem);
473    if (mem_it == dev_data->memObjMap.end()) {
474        return NULL;
475    }
476    return mem_it->second.get();
477}
478
479static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
480                             const VkMemoryAllocateInfo *pAllocateInfo) {
481    assert(object != NULL);
482
483    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
484}
485
486// Helper function to print lowercase string of object type
487//  TODO: Unify string helper functions, this should really come out of a string helper if not there already
488static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
489    switch (type) {
490    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
491        return "image";
492    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
493        return "buffer";
494    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
495        return "image view";
496    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
497        return "buffer view";
498    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
499        return "swapchain";
500    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
501        return "descriptor set";
502    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
503        return "framebuffer";
504    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
505        return "event";
506    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
507        return "query pool";
508    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
509        return "descriptor pool";
510    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
511        return "command pool";
512    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
513        return "pipeline";
514    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
515        return "sampler";
516    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
517        return "renderpass";
518    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
519        return "device memory";
520    case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
521        return "semaphore";
522    default:
523        return "unknown";
524    }
525}
526
527// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
528static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
529                                  VkDebugReportObjectTypeEXT type, const char *functionName) {
530    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
531    if (mem_info) {
532        if (!mem_info->bound_ranges[bound_object_handle].valid) {
533            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
534                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
535                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
536                           ", please fill the memory before using.",
537                           functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
538        }
539    }
540    return false;
541}
542// For given image_state
543//  If mem is special swapchain key, then verify that image_state valid member is true
544//  Else verify that the image's bound memory range is valid
545static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
546    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
547        if (!image_state->valid) {
548            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
549                           reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
550                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
551                           functionName, reinterpret_cast<uint64_t &>(image_state->image));
552        }
553    } else {
554        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image),
555                                     VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
556    }
557    return false;
558}
559// For given buffer_node, verify that the range it's bound to is valid
560static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_NODE *buffer_node, const char *functionName) {
561    return ValidateMemoryIsValid(dev_data, buffer_node->binding.mem, reinterpret_cast<uint64_t &>(buffer_node->buffer),
562                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
563}
564// For the given memory allocation, set the range bound by the given handle object to the valid param value
565static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
566    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
567    if (mem_info) {
568        mem_info->bound_ranges[handle].valid = valid;
569    }
570}
571// For given image node
572//  If mem is special swapchain key, then set entire image_state to valid param value
573//  Else set the image's bound memory range to valid param value
574static void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
575    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
576        image_state->valid = valid;
577    } else {
578        SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid);
579    }
580}
581// For given buffer node set the buffer's bound memory range to valid param value
582static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_NODE *buffer_node, bool valid) {
583    SetMemoryValid(dev_data, buffer_node->binding.mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), valid);
584}
585// Find CB Info and add mem reference to list container
586// Find Mem Obj Info and add CB reference to list container
587static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
588                                              const char *apiName) {
589    bool skip_call = false;
590
591    // Skip validation if this image was created through WSI
592    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
593
594        // First update CB binding in MemObj mini CB list
595        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
596        if (pMemInfo) {
597            // Now update CBInfo's Mem reference list
598            GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, cb);
599            pMemInfo->cb_bindings.insert(cb_node);
600            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
601            if (cb_node) {
602                cb_node->memObjs.insert(mem);
603            }
604        }
605    }
606    return skip_call;
607}
608
609// Create binding link between given sampler and command buffer node
610void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
611    sampler_state->cb_bindings.insert(cb_node);
612    cb_node->object_bindings.insert(
613        {reinterpret_cast<uint64_t &>(sampler_state->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
614}
615
616// Create binding link between given image node and command buffer node
617void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
618    // Skip validation if this image was created through WSI
619    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
620        // First update CB binding in MemObj mini CB list
621        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, image_state->binding.mem);
622        if (pMemInfo) {
623            pMemInfo->cb_bindings.insert(cb_node);
624            // Now update CBInfo's Mem reference list
625            cb_node->memObjs.insert(image_state->binding.mem);
626        }
627        // Now update cb binding for image
628        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
629        image_state->cb_bindings.insert(cb_node);
630    }
631}
632
633// Create binding link between given image view node and its image with command buffer node
634void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
635    // First add bindings for imageView
636    view_state->cb_bindings.insert(cb_node);
637    cb_node->object_bindings.insert(
638        {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
639    auto image_state = getImageState(dev_data, view_state->create_info.image);
640    // Add bindings for image within imageView
641    if (image_state) {
642        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
643    }
644}
645
646// Create binding link between given buffer node and command buffer node
647void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_NODE *buff_node) {
648    // First update CB binding in MemObj mini CB list
649    DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, buff_node->binding.mem);
650    if (pMemInfo) {
651        pMemInfo->cb_bindings.insert(cb_node);
652        // Now update CBInfo's Mem reference list
653        cb_node->memObjs.insert(buff_node->binding.mem);
654    }
655    // Now update cb binding for buffer
656    cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
657    buff_node->cb_bindings.insert(cb_node);
658}
659
660// Create binding link between given buffer view node and its buffer with command buffer node
661void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
662    // First add bindings for bufferView
663    view_state->cb_bindings.insert(cb_node);
664    cb_node->object_bindings.insert(
665        {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
666    auto buffer_node = getBufferNode(dev_data, view_state->create_info.buffer);
667    // Add bindings for buffer within bufferView
668    if (buffer_node) {
669        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_node);
670    }
671}
672
673// For every mem obj bound to particular CB, free bindings related to that CB
674static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
675    if (cb_node) {
676        if (cb_node->memObjs.size() > 0) {
677            for (auto mem : cb_node->memObjs) {
678                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
679                if (pInfo) {
680                    pInfo->cb_bindings.erase(cb_node);
681                }
682            }
683            cb_node->memObjs.clear();
684        }
685        cb_node->validate_functions.clear();
686    }
687}
688// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
689static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
690    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
691}
692
693// Clear a single object binding from given memory object, or report error if binding is missing
694static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory mem) {
695    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
696    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
697    if (mem_info && !mem_info->obj_bindings.erase({handle, type})) {
698        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
699                    "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
700                           ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
701                    object_type_to_string(type), handle, (uint64_t)mem);
702    }
703    return false;
704}
705
706// ClearMemoryObjectBindings clears the binding of objects to memory
707//  For the given object it pulls the memory bindings and makes sure that the bindings
708//  no longer refer to the object being cleared. This occurs when objects are destroyed.
709static bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
710    bool skip = false;
711    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
712    if (mem_binding) {
713        if (!mem_binding->sparse) {
714            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
715        } else { // Sparse, clear all bindings
716            for (auto& sparse_mem_binding : mem_binding->sparse_bindings) {
717                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
718            }
719        }
720    }
721    return skip;
722}
723
724// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
725bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
726                              const char *type_name) {
727    bool result = false;
728    if (VK_NULL_HANDLE == mem) {
729        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
730                         __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
731                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling "
732                         "vkBind%sMemory().",
733                         api_name, type_name, handle, type_name);
734    } else if (MEMORY_UNBOUND == mem) {
735        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
736                         __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
737                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. "
738                         "Memory must not be freed prior to this operation.",
739                         api_name, type_name, handle);
740    }
741    return result;
742}
743
744// Check to see if memory was ever bound to this image
745bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name) {
746    bool result = false;
747    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
748        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<const uint64_t &>(image_state->image),
749                                          api_name, "Image");
750    }
751    return result;
752}
753
754// Check to see if memory was bound to this buffer
755bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_NODE *buffer_node, const char *api_name) {
756    bool result = false;
757    if (0 == (static_cast<uint32_t>(buffer_node->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
758        result = VerifyBoundMemoryIsValid(dev_data, buffer_node->binding.mem,
759                                          reinterpret_cast<const uint64_t &>(buffer_node->buffer), api_name, "Buffer");
760    }
761    return result;
762}
763
764// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object
765// For NULL mem case, output warning
766// Make sure given object is in global object map
767//  IF a previous binding existed, output validation error
768//  Otherwise, add reference from objectInfo to memoryInfo
769//  Add reference off of objInfo
770static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
771                          const char *apiName) {
772    bool skip_call = false;
773    // It's an error to bind an object to NULL memory
774    if (mem == VK_NULL_HANDLE) {
775        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
776                            "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
777    } else {
778        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
779        assert(mem_binding);
780        // TODO : Add check here to make sure object isn't sparse
781        //  VALIDATION_ERROR_00792 for buffers
782        //  VALIDATION_ERROR_00804 for images
783        assert(!mem_binding->sparse);
784        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
785        if (mem_info) {
786            DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, mem_binding->binding.mem);
787            if (prev_binding) {
788                skip_call |=
789                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
790                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
791                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
792                            ") which has already been bound to mem object 0x%" PRIxLEAST64,
793                            apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
794            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
795                skip_call |=
796                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
797                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
798                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
799                            ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
800                            "Vulkan so this attempt to bind to new memory is not allowed.",
801                            apiName, reinterpret_cast<uint64_t &>(mem), handle);
802            } else {
803                mem_info->obj_bindings.insert({handle, type});
804                // For image objects, make sure default memory state is correctly set
805                // TODO : What's the best/correct way to handle this?
806                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
807                    auto const image_state = getImageState(dev_data, VkImage(handle));
808                    if (image_state) {
809                        VkImageCreateInfo ici = image_state->createInfo;
810                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
811                            // TODO::  More memory state transition stuff.
812                        }
813                    }
814                }
815                mem_binding->binding.mem = mem;
816            }
817        }
818    }
819    return skip_call;
820}
821
822// For NULL mem case, clear any previous binding Else...
823// Make sure given object is in its object map
824//  IF a previous binding existed, update binding
825//  Add reference from objectInfo to memoryInfo
826//  Add reference off of object's binding info
827// Return VK_TRUE if addition is successful, VK_FALSE otherwise
828static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VkDebugReportObjectTypeEXT type,
829                                const char *apiName) {
830    bool skip_call = VK_FALSE;
831    // Handle NULL case separately, just clear previous binding & decrement reference
832    if (binding.mem == VK_NULL_HANDLE) {
833        // TODO : This should cause the range of the resource to be unbound according to spec
834    } else {
835        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
836        assert(mem_binding);
837        assert(mem_binding->sparse);
838        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, binding.mem);
839        if (mem_info) {
840            mem_info->obj_bindings.insert({handle, type});
841            // Need to set mem binding for this object
842            mem_binding->sparse_bindings.insert(binding);
843        }
844    }
845    return skip_call;
846}
847
848// For handle of given object type, return memory binding
849static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
850    bool skip_call = false;
851    *mem = VK_NULL_HANDLE;
852    switch (type) {
853    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
854        *mem = getImageState(dev_data, VkImage(handle))->binding.mem;
855        break;
856    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
857        *mem = getBufferNode(dev_data, VkBuffer(handle))->binding.mem;
858        break;
859    default:
860        assert(0);
861    }
862    if (!*mem) {
863        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
864                            "MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64
865                                   " but binding is NULL. Has memory been bound to this object?",
866                            object_type_to_string(type), handle);
867    }
868    return skip_call;
869}
870
871// Print details of MemObjInfo list
872static void print_mem_list(layer_data *dev_data) {
873    // Early out if info is not requested
874    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
875        return;
876    }
877
878    // Just printing each msg individually for now, may want to package these into single large print
879    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
880            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
881            dev_data->memObjMap.size());
882    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
883            MEMTRACK_NONE, "MEM", "=============================");
884
885    if (dev_data->memObjMap.size() <= 0)
886        return;
887
888    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
889        auto mem_info = (*ii).second.get();
890
891        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
892                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
893        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
894                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
895        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
896                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
897                mem_info->cb_bindings.size() + mem_info->obj_bindings.size());
898        if (0 != mem_info->alloc_info.allocationSize) {
899            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->alloc_info, "MEM(INFO):         ");
900            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
901                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
902        } else {
903            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
904                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
905        }
906
907        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
908                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
909                mem_info->obj_bindings.size());
910        if (mem_info->obj_bindings.size() > 0) {
911            for (auto obj : mem_info->obj_bindings) {
912                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
913                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
914            }
915        }
916
917        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
918                __LINE__, MEMTRACK_NONE, "MEM",
919                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
920                mem_info->cb_bindings.size());
921        if (mem_info->cb_bindings.size() > 0) {
922            for (auto cb : mem_info->cb_bindings) {
923                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
924                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK command buffer 0x%p", cb);
925            }
926        }
927    }
928}
929
930static void printCBList(layer_data *my_data) {
931    GLOBAL_CB_NODE *pCBInfo = NULL;
932
933    // Early out if info is not requested
934    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
935        return;
936    }
937
938    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
939            MEMTRACK_NONE, "MEM", "Details of command buffer list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
940            my_data->commandBufferMap.size());
941    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
942            MEMTRACK_NONE, "MEM", "==================");
943
944    if (my_data->commandBufferMap.size() <= 0)
945        return;
946
947    for (auto &cb_node : my_data->commandBufferMap) {
948        pCBInfo = cb_node.second;
949
950        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
951                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has command buffer 0x%p", (void *)pCBInfo,
952                (void *)pCBInfo->commandBuffer);
953
954        if (pCBInfo->memObjs.size() <= 0)
955            continue;
956        for (auto obj : pCBInfo->memObjs) {
957            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
958                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
959        }
960    }
961}
962
963// Return a string representation of CMD_TYPE enum
964static string cmdTypeToString(CMD_TYPE cmd) {
965    switch (cmd) {
966    case CMD_BINDPIPELINE:
967        return "CMD_BINDPIPELINE";
968    case CMD_BINDPIPELINEDELTA:
969        return "CMD_BINDPIPELINEDELTA";
970    case CMD_SETVIEWPORTSTATE:
971        return "CMD_SETVIEWPORTSTATE";
972    case CMD_SETLINEWIDTHSTATE:
973        return "CMD_SETLINEWIDTHSTATE";
974    case CMD_SETDEPTHBIASSTATE:
975        return "CMD_SETDEPTHBIASSTATE";
976    case CMD_SETBLENDSTATE:
977        return "CMD_SETBLENDSTATE";
978    case CMD_SETDEPTHBOUNDSSTATE:
979        return "CMD_SETDEPTHBOUNDSSTATE";
980    case CMD_SETSTENCILREADMASKSTATE:
981        return "CMD_SETSTENCILREADMASKSTATE";
982    case CMD_SETSTENCILWRITEMASKSTATE:
983        return "CMD_SETSTENCILWRITEMASKSTATE";
984    case CMD_SETSTENCILREFERENCESTATE:
985        return "CMD_SETSTENCILREFERENCESTATE";
986    case CMD_BINDDESCRIPTORSETS:
987        return "CMD_BINDDESCRIPTORSETS";
988    case CMD_BINDINDEXBUFFER:
989        return "CMD_BINDINDEXBUFFER";
990    case CMD_BINDVERTEXBUFFER:
991        return "CMD_BINDVERTEXBUFFER";
992    case CMD_DRAW:
993        return "CMD_DRAW";
994    case CMD_DRAWINDEXED:
995        return "CMD_DRAWINDEXED";
996    case CMD_DRAWINDIRECT:
997        return "CMD_DRAWINDIRECT";
998    case CMD_DRAWINDEXEDINDIRECT:
999        return "CMD_DRAWINDEXEDINDIRECT";
1000    case CMD_DISPATCH:
1001        return "CMD_DISPATCH";
1002    case CMD_DISPATCHINDIRECT:
1003        return "CMD_DISPATCHINDIRECT";
1004    case CMD_COPYBUFFER:
1005        return "CMD_COPYBUFFER";
1006    case CMD_COPYIMAGE:
1007        return "CMD_COPYIMAGE";
1008    case CMD_BLITIMAGE:
1009        return "CMD_BLITIMAGE";
1010    case CMD_COPYBUFFERTOIMAGE:
1011        return "CMD_COPYBUFFERTOIMAGE";
1012    case CMD_COPYIMAGETOBUFFER:
1013        return "CMD_COPYIMAGETOBUFFER";
1014    case CMD_CLONEIMAGEDATA:
1015        return "CMD_CLONEIMAGEDATA";
1016    case CMD_UPDATEBUFFER:
1017        return "CMD_UPDATEBUFFER";
1018    case CMD_FILLBUFFER:
1019        return "CMD_FILLBUFFER";
1020    case CMD_CLEARCOLORIMAGE:
1021        return "CMD_CLEARCOLORIMAGE";
1022    case CMD_CLEARATTACHMENTS:
1023        return "CMD_CLEARCOLORATTACHMENT";
1024    case CMD_CLEARDEPTHSTENCILIMAGE:
1025        return "CMD_CLEARDEPTHSTENCILIMAGE";
1026    case CMD_RESOLVEIMAGE:
1027        return "CMD_RESOLVEIMAGE";
1028    case CMD_SETEVENT:
1029        return "CMD_SETEVENT";
1030    case CMD_RESETEVENT:
1031        return "CMD_RESETEVENT";
1032    case CMD_WAITEVENTS:
1033        return "CMD_WAITEVENTS";
1034    case CMD_PIPELINEBARRIER:
1035        return "CMD_PIPELINEBARRIER";
1036    case CMD_BEGINQUERY:
1037        return "CMD_BEGINQUERY";
1038    case CMD_ENDQUERY:
1039        return "CMD_ENDQUERY";
1040    case CMD_RESETQUERYPOOL:
1041        return "CMD_RESETQUERYPOOL";
1042    case CMD_COPYQUERYPOOLRESULTS:
1043        return "CMD_COPYQUERYPOOLRESULTS";
1044    case CMD_WRITETIMESTAMP:
1045        return "CMD_WRITETIMESTAMP";
1046    case CMD_INITATOMICCOUNTERS:
1047        return "CMD_INITATOMICCOUNTERS";
1048    case CMD_LOADATOMICCOUNTERS:
1049        return "CMD_LOADATOMICCOUNTERS";
1050    case CMD_SAVEATOMICCOUNTERS:
1051        return "CMD_SAVEATOMICCOUNTERS";
1052    case CMD_BEGINRENDERPASS:
1053        return "CMD_BEGINRENDERPASS";
1054    case CMD_ENDRENDERPASS:
1055        return "CMD_ENDRENDERPASS";
1056    default:
1057        return "UNKNOWN";
1058    }
1059}
1060
1061// SPIRV utility functions
1062static void build_def_index(shader_module *module) {
1063    for (auto insn : *module) {
1064        switch (insn.opcode()) {
1065        /* Types */
1066        case spv::OpTypeVoid:
1067        case spv::OpTypeBool:
1068        case spv::OpTypeInt:
1069        case spv::OpTypeFloat:
1070        case spv::OpTypeVector:
1071        case spv::OpTypeMatrix:
1072        case spv::OpTypeImage:
1073        case spv::OpTypeSampler:
1074        case spv::OpTypeSampledImage:
1075        case spv::OpTypeArray:
1076        case spv::OpTypeRuntimeArray:
1077        case spv::OpTypeStruct:
1078        case spv::OpTypeOpaque:
1079        case spv::OpTypePointer:
1080        case spv::OpTypeFunction:
1081        case spv::OpTypeEvent:
1082        case spv::OpTypeDeviceEvent:
1083        case spv::OpTypeReserveId:
1084        case spv::OpTypeQueue:
1085        case spv::OpTypePipe:
1086            module->def_index[insn.word(1)] = insn.offset();
1087            break;
1088
1089        /* Fixed constants */
1090        case spv::OpConstantTrue:
1091        case spv::OpConstantFalse:
1092        case spv::OpConstant:
1093        case spv::OpConstantComposite:
1094        case spv::OpConstantSampler:
1095        case spv::OpConstantNull:
1096            module->def_index[insn.word(2)] = insn.offset();
1097            break;
1098
1099        /* Specialization constants */
1100        case spv::OpSpecConstantTrue:
1101        case spv::OpSpecConstantFalse:
1102        case spv::OpSpecConstant:
1103        case spv::OpSpecConstantComposite:
1104        case spv::OpSpecConstantOp:
1105            module->def_index[insn.word(2)] = insn.offset();
1106            break;
1107
1108        /* Variables */
1109        case spv::OpVariable:
1110            module->def_index[insn.word(2)] = insn.offset();
1111            break;
1112
1113        /* Functions */
1114        case spv::OpFunction:
1115            module->def_index[insn.word(2)] = insn.offset();
1116            break;
1117
1118        default:
1119            /* We don't care about any other defs for now. */
1120            break;
1121        }
1122    }
1123}
1124
1125static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1126    for (auto insn : *src) {
1127        if (insn.opcode() == spv::OpEntryPoint) {
1128            auto entrypointName = (char const *)&insn.word(3);
1129            auto entrypointStageBits = 1u << insn.word(1);
1130
1131            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1132                return insn;
1133            }
1134        }
1135    }
1136
1137    return src->end();
1138}
1139
1140static char const *storage_class_name(unsigned sc) {
1141    switch (sc) {
1142    case spv::StorageClassInput:
1143        return "input";
1144    case spv::StorageClassOutput:
1145        return "output";
1146    case spv::StorageClassUniformConstant:
1147        return "const uniform";
1148    case spv::StorageClassUniform:
1149        return "uniform";
1150    case spv::StorageClassWorkgroup:
1151        return "workgroup local";
1152    case spv::StorageClassCrossWorkgroup:
1153        return "workgroup global";
1154    case spv::StorageClassPrivate:
1155        return "private global";
1156    case spv::StorageClassFunction:
1157        return "function";
1158    case spv::StorageClassGeneric:
1159        return "generic";
1160    case spv::StorageClassAtomicCounter:
1161        return "atomic counter";
1162    case spv::StorageClassImage:
1163        return "image";
1164    case spv::StorageClassPushConstant:
1165        return "push constant";
1166    default:
1167        return "unknown";
1168    }
1169}
1170
1171/* get the value of an integral constant */
1172unsigned get_constant_value(shader_module const *src, unsigned id) {
1173    auto value = src->get_def(id);
1174    assert(value != src->end());
1175
1176    if (value.opcode() != spv::OpConstant) {
1177        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1178            considering here, OR -- specialize on the fly now.
1179            */
1180        return 1;
1181    }
1182
1183    return value.word(3);
1184}
1185
1186
1187static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1188    auto insn = src->get_def(type);
1189    assert(insn != src->end());
1190
1191    switch (insn.opcode()) {
1192    case spv::OpTypeBool:
1193        ss << "bool";
1194        break;
1195    case spv::OpTypeInt:
1196        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1197        break;
1198    case spv::OpTypeFloat:
1199        ss << "float" << insn.word(2);
1200        break;
1201    case spv::OpTypeVector:
1202        ss << "vec" << insn.word(3) << " of ";
1203        describe_type_inner(ss, src, insn.word(2));
1204        break;
1205    case spv::OpTypeMatrix:
1206        ss << "mat" << insn.word(3) << " of ";
1207        describe_type_inner(ss, src, insn.word(2));
1208        break;
1209    case spv::OpTypeArray:
1210        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1211        describe_type_inner(ss, src, insn.word(2));
1212        break;
1213    case spv::OpTypePointer:
1214        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1215        describe_type_inner(ss, src, insn.word(3));
1216        break;
1217    case spv::OpTypeStruct: {
1218        ss << "struct of (";
1219        for (unsigned i = 2; i < insn.len(); i++) {
1220            describe_type_inner(ss, src, insn.word(i));
1221            if (i == insn.len() - 1) {
1222                ss << ")";
1223            } else {
1224                ss << ", ";
1225            }
1226        }
1227        break;
1228    }
1229    case spv::OpTypeSampler:
1230        ss << "sampler";
1231        break;
1232    case spv::OpTypeSampledImage:
1233        ss << "sampler+";
1234        describe_type_inner(ss, src, insn.word(2));
1235        break;
1236    case spv::OpTypeImage:
1237        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1238        break;
1239    default:
1240        ss << "oddtype";
1241        break;
1242    }
1243}
1244
1245
1246static std::string describe_type(shader_module const *src, unsigned type) {
1247    std::ostringstream ss;
1248    describe_type_inner(ss, src, type);
1249    return ss.str();
1250}
1251
1252
1253static bool is_narrow_numeric_type(spirv_inst_iter type)
1254{
1255    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1256        return false;
1257    return type.word(2) < 64;
1258}
1259
1260
1261static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1262    /* walk two type trees together, and complain about differences */
1263    auto a_insn = a->get_def(a_type);
1264    auto b_insn = b->get_def(b_type);
1265    assert(a_insn != a->end());
1266    assert(b_insn != b->end());
1267
1268    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1269        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1270    }
1271
1272    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1273        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1274        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1275    }
1276
1277    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1278        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1279    }
1280
1281    if (a_insn.opcode() != b_insn.opcode()) {
1282        return false;
1283    }
1284
1285    if (a_insn.opcode() == spv::OpTypePointer) {
1286        /* match on pointee type. storage class is expected to differ */
1287        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1288    }
1289
1290    if (a_arrayed || b_arrayed) {
1291        /* if we havent resolved array-of-verts by here, we're not going to. */
1292        return false;
1293    }
1294
1295    switch (a_insn.opcode()) {
1296    case spv::OpTypeBool:
1297        return true;
1298    case spv::OpTypeInt:
1299        /* match on width, signedness */
1300        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1301    case spv::OpTypeFloat:
1302        /* match on width */
1303        return a_insn.word(2) == b_insn.word(2);
1304    case spv::OpTypeVector:
1305        /* match on element type, count. */
1306        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1307            return false;
1308        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1309            return a_insn.word(3) >= b_insn.word(3);
1310        }
1311        else {
1312            return a_insn.word(3) == b_insn.word(3);
1313        }
1314    case spv::OpTypeMatrix:
1315        /* match on element type, count. */
1316        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1317    case spv::OpTypeArray:
1318        /* match on element type, count. these all have the same layout. we don't get here if
1319         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1320         * not a literal within OpTypeArray */
1321        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1322               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1323    case spv::OpTypeStruct:
1324        /* match on all element types */
1325        {
1326            if (a_insn.len() != b_insn.len()) {
1327                return false; /* structs cannot match if member counts differ */
1328            }
1329
1330            for (unsigned i = 2; i < a_insn.len(); i++) {
1331                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1332                    return false;
1333                }
1334            }
1335
1336            return true;
1337        }
1338    default:
1339        /* remaining types are CLisms, or may not appear in the interfaces we
1340         * are interested in. Just claim no match.
1341         */
1342        return false;
1343    }
1344}
1345
1346static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1347    auto it = map.find(id);
1348    if (it == map.end())
1349        return def;
1350    else
1351        return it->second;
1352}
1353
1354static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1355    auto insn = src->get_def(type);
1356    assert(insn != src->end());
1357
1358    switch (insn.opcode()) {
1359    case spv::OpTypePointer:
1360        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1361         * we're never actually passing pointers around. */
1362        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1363    case spv::OpTypeArray:
1364        if (strip_array_level) {
1365            return get_locations_consumed_by_type(src, insn.word(2), false);
1366        } else {
1367            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1368        }
1369    case spv::OpTypeMatrix:
1370        /* num locations is the dimension * element size */
1371        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1372    case spv::OpTypeVector: {
1373        auto scalar_type = src->get_def(insn.word(2));
1374        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1375            scalar_type.word(2) : 32;
1376
1377        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1378         * types require two. */
1379        return (bit_width * insn.word(3) + 127) / 128;
1380    }
1381    default:
1382        /* everything else is just 1. */
1383        return 1;
1384
1385        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1386         * multiple locations. */
1387    }
1388}
1389
1390static unsigned get_locations_consumed_by_format(VkFormat format) {
1391    switch (format) {
1392    case VK_FORMAT_R64G64B64A64_SFLOAT:
1393    case VK_FORMAT_R64G64B64A64_SINT:
1394    case VK_FORMAT_R64G64B64A64_UINT:
1395    case VK_FORMAT_R64G64B64_SFLOAT:
1396    case VK_FORMAT_R64G64B64_SINT:
1397    case VK_FORMAT_R64G64B64_UINT:
1398        return 2;
1399    default:
1400        return 1;
1401    }
1402}
1403
1404typedef std::pair<unsigned, unsigned> location_t;
1405typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1406
1407struct interface_var {
1408    uint32_t id;
1409    uint32_t type_id;
1410    uint32_t offset;
1411    bool is_patch;
1412    bool is_block_member;
1413    /* TODO: collect the name, too? Isn't required to be present. */
1414};
1415
1416struct shader_stage_attributes {
1417    char const *const name;
1418    bool arrayed_input;
1419    bool arrayed_output;
1420};
1421
1422static shader_stage_attributes shader_stage_attribs[] = {
1423    {"vertex shader", false, false},
1424    {"tessellation control shader", true, true},
1425    {"tessellation evaluation shader", true, false},
1426    {"geometry shader", true, false},
1427    {"fragment shader", false, false},
1428};
1429
1430static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1431    while (true) {
1432
1433        if (def.opcode() == spv::OpTypePointer) {
1434            def = src->get_def(def.word(3));
1435        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1436            def = src->get_def(def.word(2));
1437            is_array_of_verts = false;
1438        } else if (def.opcode() == spv::OpTypeStruct) {
1439            return def;
1440        } else {
1441            return src->end();
1442        }
1443    }
1444}
1445
1446static void collect_interface_block_members(shader_module const *src,
1447                                            std::map<location_t, interface_var> *out,
1448                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1449                                            uint32_t id, uint32_t type_id, bool is_patch) {
1450    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1451    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1452    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1453        /* this isn't an interface block. */
1454        return;
1455    }
1456
1457    std::unordered_map<unsigned, unsigned> member_components;
1458
1459    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1460    for (auto insn : *src) {
1461        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1462            unsigned member_index = insn.word(2);
1463
1464            if (insn.word(3) == spv::DecorationComponent) {
1465                unsigned component = insn.word(4);
1466                member_components[member_index] = component;
1467            }
1468        }
1469    }
1470
1471    /* Second pass -- produce the output, from Location decorations */
1472    for (auto insn : *src) {
1473        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1474            unsigned member_index = insn.word(2);
1475            unsigned member_type_id = type.word(2 + member_index);
1476
1477            if (insn.word(3) == spv::DecorationLocation) {
1478                unsigned location = insn.word(4);
1479                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1480                auto component_it = member_components.find(member_index);
1481                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1482
1483                for (unsigned int offset = 0; offset < num_locations; offset++) {
1484                    interface_var v;
1485                    v.id = id;
1486                    /* TODO: member index in interface_var too? */
1487                    v.type_id = member_type_id;
1488                    v.offset = offset;
1489                    v.is_patch = is_patch;
1490                    v.is_block_member = true;
1491                    (*out)[std::make_pair(location + offset, component)] = v;
1492                }
1493            }
1494        }
1495    }
1496}
1497
1498static std::map<location_t, interface_var> collect_interface_by_location(
1499        shader_module const *src, spirv_inst_iter entrypoint,
1500        spv::StorageClass sinterface, bool is_array_of_verts) {
1501
1502    std::unordered_map<unsigned, unsigned> var_locations;
1503    std::unordered_map<unsigned, unsigned> var_builtins;
1504    std::unordered_map<unsigned, unsigned> var_components;
1505    std::unordered_map<unsigned, unsigned> blocks;
1506    std::unordered_map<unsigned, unsigned> var_patch;
1507
1508    for (auto insn : *src) {
1509
1510        /* We consider two interface models: SSO rendezvous-by-location, and
1511         * builtins. Complain about anything that fits neither model.
1512         */
1513        if (insn.opcode() == spv::OpDecorate) {
1514            if (insn.word(2) == spv::DecorationLocation) {
1515                var_locations[insn.word(1)] = insn.word(3);
1516            }
1517
1518            if (insn.word(2) == spv::DecorationBuiltIn) {
1519                var_builtins[insn.word(1)] = insn.word(3);
1520            }
1521
1522            if (insn.word(2) == spv::DecorationComponent) {
1523                var_components[insn.word(1)] = insn.word(3);
1524            }
1525
1526            if (insn.word(2) == spv::DecorationBlock) {
1527                blocks[insn.word(1)] = 1;
1528            }
1529
1530            if (insn.word(2) == spv::DecorationPatch) {
1531                var_patch[insn.word(1)] = 1;
1532            }
1533        }
1534    }
1535
1536    /* TODO: handle grouped decorations */
1537    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1538     * have the same location, and we DON'T want to clobber. */
1539
1540    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1541       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1542       the word to determine which word contains the terminator. */
1543    uint32_t word = 3;
1544    while (entrypoint.word(word) & 0xff000000u) {
1545        ++word;
1546    }
1547    ++word;
1548
1549    std::map<location_t, interface_var> out;
1550
1551    for (; word < entrypoint.len(); word++) {
1552        auto insn = src->get_def(entrypoint.word(word));
1553        assert(insn != src->end());
1554        assert(insn.opcode() == spv::OpVariable);
1555
1556        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1557            unsigned id = insn.word(2);
1558            unsigned type = insn.word(1);
1559
1560            int location = value_or_default(var_locations, id, -1);
1561            int builtin = value_or_default(var_builtins, id, -1);
1562            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1563            bool is_patch = var_patch.find(id) != var_patch.end();
1564
1565            /* All variables and interface block members in the Input or Output storage classes
1566             * must be decorated with either a builtin or an explicit location.
1567             *
1568             * TODO: integrate the interface block support here. For now, don't complain --
1569             * a valid SPIRV module will only hit this path for the interface block case, as the
1570             * individual members of the type are decorated, rather than variable declarations.
1571             */
1572
1573            if (location != -1) {
1574                /* A user-defined interface variable, with a location. Where a variable
1575                 * occupied multiple locations, emit one result for each. */
1576                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1577                for (unsigned int offset = 0; offset < num_locations; offset++) {
1578                    interface_var v;
1579                    v.id = id;
1580                    v.type_id = type;
1581                    v.offset = offset;
1582                    v.is_patch = is_patch;
1583                    v.is_block_member = false;
1584                    out[std::make_pair(location + offset, component)] = v;
1585                }
1586            } else if (builtin == -1) {
1587                /* An interface block instance */
1588                collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1589            }
1590        }
1591    }
1592
1593    return out;
1594}
1595
1596static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1597        debug_report_data *report_data, shader_module const *src,
1598        std::unordered_set<uint32_t> const &accessible_ids) {
1599
1600    std::vector<std::pair<uint32_t, interface_var>> out;
1601
1602    for (auto insn : *src) {
1603        if (insn.opcode() == spv::OpDecorate) {
1604            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1605                auto attachment_index = insn.word(3);
1606                auto id = insn.word(1);
1607
1608                if (accessible_ids.count(id)) {
1609                    auto def = src->get_def(id);
1610                    assert(def != src->end());
1611
1612                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1613                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1614                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1615                            interface_var v;
1616                            v.id = id;
1617                            v.type_id = def.word(1);
1618                            v.offset = offset;
1619                            v.is_patch = false;
1620                            v.is_block_member = false;
1621                            out.emplace_back(attachment_index + offset, v);
1622                        }
1623                    }
1624                }
1625            }
1626        }
1627    }
1628
1629    return out;
1630}
1631
1632static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1633        debug_report_data *report_data, shader_module const *src,
1634        std::unordered_set<uint32_t> const &accessible_ids) {
1635
1636    std::unordered_map<unsigned, unsigned> var_sets;
1637    std::unordered_map<unsigned, unsigned> var_bindings;
1638
1639    for (auto insn : *src) {
1640        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1641         * DecorationDescriptorSet and DecorationBinding.
1642         */
1643        if (insn.opcode() == spv::OpDecorate) {
1644            if (insn.word(2) == spv::DecorationDescriptorSet) {
1645                var_sets[insn.word(1)] = insn.word(3);
1646            }
1647
1648            if (insn.word(2) == spv::DecorationBinding) {
1649                var_bindings[insn.word(1)] = insn.word(3);
1650            }
1651        }
1652    }
1653
1654    std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1655
1656    for (auto id : accessible_ids) {
1657        auto insn = src->get_def(id);
1658        assert(insn != src->end());
1659
1660        if (insn.opcode() == spv::OpVariable &&
1661            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1662            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1663            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1664
1665            interface_var v;
1666            v.id = insn.word(2);
1667            v.type_id = insn.word(1);
1668            v.offset = 0;
1669            v.is_patch = false;
1670            v.is_block_member = false;
1671            out.emplace_back(std::make_pair(set, binding), v);
1672        }
1673    }
1674
1675    return out;
1676}
1677
1678static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1679                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1680                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1681                                              shader_stage_attributes const *consumer_stage) {
1682    bool pass = true;
1683
1684    auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1685    auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1686
1687    auto a_it = outputs.begin();
1688    auto b_it = inputs.begin();
1689
1690    /* maps sorted by key (location); walk them together to find mismatches */
1691    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1692        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1693        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1694        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1695        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1696
1697        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1698            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1699                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1700                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1701                        a_first.second, consumer_stage->name)) {
1702                pass = false;
1703            }
1704            a_it++;
1705        } else if (a_at_end || a_first > b_first) {
1706            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1707                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1708                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1709                        producer_stage->name)) {
1710                pass = false;
1711            }
1712            b_it++;
1713        } else {
1714            // subtleties of arrayed interfaces:
1715            // - if is_patch, then the member is not arrayed, even though the interface may be.
1716            // - if is_block_member, then the extra array level of an arrayed interface is not
1717            //   expressed in the member type -- it's expressed in the block type.
1718            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1719                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1720                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1721                             true)) {
1722                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1723                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1724                            a_first.first, a_first.second,
1725                            describe_type(producer, a_it->second.type_id).c_str(),
1726                            describe_type(consumer, b_it->second.type_id).c_str())) {
1727                    pass = false;
1728                }
1729            }
1730            if (a_it->second.is_patch != b_it->second.is_patch) {
1731                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1732                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1733                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1734                            "per-%s in %s stage", a_first.first, a_first.second,
1735                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1736                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1737                    pass = false;
1738                }
1739            }
1740            a_it++;
1741            b_it++;
1742        }
1743    }
1744
1745    return pass;
1746}
1747
1748enum FORMAT_TYPE {
1749    FORMAT_TYPE_UNDEFINED,
1750    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1751    FORMAT_TYPE_SINT,
1752    FORMAT_TYPE_UINT,
1753};
1754
1755static unsigned get_format_type(VkFormat fmt) {
1756    switch (fmt) {
1757    case VK_FORMAT_UNDEFINED:
1758        return FORMAT_TYPE_UNDEFINED;
1759    case VK_FORMAT_R8_SINT:
1760    case VK_FORMAT_R8G8_SINT:
1761    case VK_FORMAT_R8G8B8_SINT:
1762    case VK_FORMAT_R8G8B8A8_SINT:
1763    case VK_FORMAT_R16_SINT:
1764    case VK_FORMAT_R16G16_SINT:
1765    case VK_FORMAT_R16G16B16_SINT:
1766    case VK_FORMAT_R16G16B16A16_SINT:
1767    case VK_FORMAT_R32_SINT:
1768    case VK_FORMAT_R32G32_SINT:
1769    case VK_FORMAT_R32G32B32_SINT:
1770    case VK_FORMAT_R32G32B32A32_SINT:
1771    case VK_FORMAT_R64_SINT:
1772    case VK_FORMAT_R64G64_SINT:
1773    case VK_FORMAT_R64G64B64_SINT:
1774    case VK_FORMAT_R64G64B64A64_SINT:
1775    case VK_FORMAT_B8G8R8_SINT:
1776    case VK_FORMAT_B8G8R8A8_SINT:
1777    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1778    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1779    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1780        return FORMAT_TYPE_SINT;
1781    case VK_FORMAT_R8_UINT:
1782    case VK_FORMAT_R8G8_UINT:
1783    case VK_FORMAT_R8G8B8_UINT:
1784    case VK_FORMAT_R8G8B8A8_UINT:
1785    case VK_FORMAT_R16_UINT:
1786    case VK_FORMAT_R16G16_UINT:
1787    case VK_FORMAT_R16G16B16_UINT:
1788    case VK_FORMAT_R16G16B16A16_UINT:
1789    case VK_FORMAT_R32_UINT:
1790    case VK_FORMAT_R32G32_UINT:
1791    case VK_FORMAT_R32G32B32_UINT:
1792    case VK_FORMAT_R32G32B32A32_UINT:
1793    case VK_FORMAT_R64_UINT:
1794    case VK_FORMAT_R64G64_UINT:
1795    case VK_FORMAT_R64G64B64_UINT:
1796    case VK_FORMAT_R64G64B64A64_UINT:
1797    case VK_FORMAT_B8G8R8_UINT:
1798    case VK_FORMAT_B8G8R8A8_UINT:
1799    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1800    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1801    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1802        return FORMAT_TYPE_UINT;
1803    default:
1804        return FORMAT_TYPE_FLOAT;
1805    }
1806}
1807
1808/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1809 * for comparison to a VkFormat's characterization above. */
1810static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1811    auto insn = src->get_def(type);
1812    assert(insn != src->end());
1813
1814    switch (insn.opcode()) {
1815    case spv::OpTypeInt:
1816        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1817    case spv::OpTypeFloat:
1818        return FORMAT_TYPE_FLOAT;
1819    case spv::OpTypeVector:
1820        return get_fundamental_type(src, insn.word(2));
1821    case spv::OpTypeMatrix:
1822        return get_fundamental_type(src, insn.word(2));
1823    case spv::OpTypeArray:
1824        return get_fundamental_type(src, insn.word(2));
1825    case spv::OpTypePointer:
1826        return get_fundamental_type(src, insn.word(3));
1827    case spv::OpTypeImage:
1828        return get_fundamental_type(src, insn.word(2));
1829
1830    default:
1831        return FORMAT_TYPE_UNDEFINED;
1832    }
1833}
1834
1835static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1836    uint32_t bit_pos = u_ffs(stage);
1837    return bit_pos - 1;
1838}
1839
1840static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1841    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1842     * each binding should be specified only once.
1843     */
1844    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1845    bool pass = true;
1846
1847    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1848        auto desc = &vi->pVertexBindingDescriptions[i];
1849        auto &binding = bindings[desc->binding];
1850        if (binding) {
1851            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1852                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1853                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1854                pass = false;
1855            }
1856        } else {
1857            binding = desc;
1858        }
1859    }
1860
1861    return pass;
1862}
1863
1864static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1865                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1866    bool pass = true;
1867
1868    auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1869
1870    /* Build index by location */
1871    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1872    if (vi) {
1873        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1874            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1875            for (auto j = 0u; j < num_locations; j++) {
1876                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1877            }
1878        }
1879    }
1880
1881    auto it_a = attribs.begin();
1882    auto it_b = inputs.begin();
1883    bool used = false;
1884
1885    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1886        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1887        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1888        auto a_first = a_at_end ? 0 : it_a->first;
1889        auto b_first = b_at_end ? 0 : it_b->first.first;
1890        if (!a_at_end && (b_at_end || a_first < b_first)) {
1891            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1892                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1893                        "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
1894                pass = false;
1895            }
1896            used = false;
1897            it_a++;
1898        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1899            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1900                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
1901                        b_first)) {
1902                pass = false;
1903            }
1904            it_b++;
1905        } else {
1906            unsigned attrib_type = get_format_type(it_a->second->format);
1907            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1908
1909            /* type checking */
1910            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1911                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1912                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1913                            "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
1914                            string_VkFormat(it_a->second->format), a_first,
1915                            describe_type(vs, it_b->second.type_id).c_str())) {
1916                    pass = false;
1917                }
1918            }
1919
1920            /* OK! */
1921            used = true;
1922            it_b++;
1923        }
1924    }
1925
1926    return pass;
1927}
1928
1929static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1930                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1931                                                    uint32_t subpass_index) {
1932    std::map<uint32_t, VkFormat> color_attachments;
1933    auto subpass = rpci->pSubpasses[subpass_index];
1934    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1935        uint32_t attachment = subpass.pColorAttachments[i].attachment;
1936        if (attachment == VK_ATTACHMENT_UNUSED)
1937            continue;
1938        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1939            color_attachments[i] = rpci->pAttachments[attachment].format;
1940        }
1941    }
1942
1943    bool pass = true;
1944
1945    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1946
1947    auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
1948
1949    auto it_a = outputs.begin();
1950    auto it_b = color_attachments.begin();
1951
1952    /* Walk attachment list and outputs together */
1953
1954    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1955        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1956        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1957
1958        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1959            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1960                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1961                        "fragment shader writes to output location %d with no matching attachment", it_a->first.first)) {
1962                pass = false;
1963            }
1964            it_a++;
1965        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1966            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1967                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader",
1968                        it_b->first)) {
1969                pass = false;
1970            }
1971            it_b++;
1972        } else {
1973            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1974            unsigned att_type = get_format_type(it_b->second);
1975
1976            /* type checking */
1977            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1978                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1979                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1980                            "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
1981                            string_VkFormat(it_b->second),
1982                            describe_type(fs, it_a->second.type_id).c_str())) {
1983                    pass = false;
1984                }
1985            }
1986
1987            /* OK! */
1988            it_a++;
1989            it_b++;
1990        }
1991    }
1992
1993    return pass;
1994}
1995
1996/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1997 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1998 * for example.
1999 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
2000 *  - NOT the shader input/output interfaces.
2001 *
2002 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
2003 * converting parts of this to be generated from the machine-readable spec instead.
2004 */
2005static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
2006    std::unordered_set<uint32_t> ids;
2007    std::unordered_set<uint32_t> worklist;
2008    worklist.insert(entrypoint.word(2));
2009
2010    while (!worklist.empty()) {
2011        auto id_iter = worklist.begin();
2012        auto id = *id_iter;
2013        worklist.erase(id_iter);
2014
2015        auto insn = src->get_def(id);
2016        if (insn == src->end()) {
2017            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
2018             * across all kinds of things here that we may not care about. */
2019            continue;
2020        }
2021
2022        /* try to add to the output set */
2023        if (!ids.insert(id).second) {
2024            continue; /* if we already saw this id, we don't want to walk it again. */
2025        }
2026
2027        switch (insn.opcode()) {
2028        case spv::OpFunction:
2029            /* scan whole body of the function, enlisting anything interesting */
2030            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
2031                switch (insn.opcode()) {
2032                case spv::OpLoad:
2033                case spv::OpAtomicLoad:
2034                case spv::OpAtomicExchange:
2035                case spv::OpAtomicCompareExchange:
2036                case spv::OpAtomicCompareExchangeWeak:
2037                case spv::OpAtomicIIncrement:
2038                case spv::OpAtomicIDecrement:
2039                case spv::OpAtomicIAdd:
2040                case spv::OpAtomicISub:
2041                case spv::OpAtomicSMin:
2042                case spv::OpAtomicUMin:
2043                case spv::OpAtomicSMax:
2044                case spv::OpAtomicUMax:
2045                case spv::OpAtomicAnd:
2046                case spv::OpAtomicOr:
2047                case spv::OpAtomicXor:
2048                    worklist.insert(insn.word(3)); /* ptr */
2049                    break;
2050                case spv::OpStore:
2051                case spv::OpAtomicStore:
2052                    worklist.insert(insn.word(1)); /* ptr */
2053                    break;
2054                case spv::OpAccessChain:
2055                case spv::OpInBoundsAccessChain:
2056                    worklist.insert(insn.word(3)); /* base ptr */
2057                    break;
2058                case spv::OpSampledImage:
2059                case spv::OpImageSampleImplicitLod:
2060                case spv::OpImageSampleExplicitLod:
2061                case spv::OpImageSampleDrefImplicitLod:
2062                case spv::OpImageSampleDrefExplicitLod:
2063                case spv::OpImageSampleProjImplicitLod:
2064                case spv::OpImageSampleProjExplicitLod:
2065                case spv::OpImageSampleProjDrefImplicitLod:
2066                case spv::OpImageSampleProjDrefExplicitLod:
2067                case spv::OpImageFetch:
2068                case spv::OpImageGather:
2069                case spv::OpImageDrefGather:
2070                case spv::OpImageRead:
2071                case spv::OpImage:
2072                case spv::OpImageQueryFormat:
2073                case spv::OpImageQueryOrder:
2074                case spv::OpImageQuerySizeLod:
2075                case spv::OpImageQuerySize:
2076                case spv::OpImageQueryLod:
2077                case spv::OpImageQueryLevels:
2078                case spv::OpImageQuerySamples:
2079                case spv::OpImageSparseSampleImplicitLod:
2080                case spv::OpImageSparseSampleExplicitLod:
2081                case spv::OpImageSparseSampleDrefImplicitLod:
2082                case spv::OpImageSparseSampleDrefExplicitLod:
2083                case spv::OpImageSparseSampleProjImplicitLod:
2084                case spv::OpImageSparseSampleProjExplicitLod:
2085                case spv::OpImageSparseSampleProjDrefImplicitLod:
2086                case spv::OpImageSparseSampleProjDrefExplicitLod:
2087                case spv::OpImageSparseFetch:
2088                case spv::OpImageSparseGather:
2089                case spv::OpImageSparseDrefGather:
2090                case spv::OpImageTexelPointer:
2091                    worklist.insert(insn.word(3)); /* image or sampled image */
2092                    break;
2093                case spv::OpImageWrite:
2094                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2095                    break;
2096                case spv::OpFunctionCall:
2097                    for (uint32_t i = 3; i < insn.len(); i++) {
2098                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2099                    }
2100                    break;
2101
2102                case spv::OpExtInst:
2103                    for (uint32_t i = 5; i < insn.len(); i++) {
2104                        worklist.insert(insn.word(i)); /* operands to ext inst */
2105                    }
2106                    break;
2107                }
2108            }
2109            break;
2110        }
2111    }
2112
2113    return ids;
2114}
2115
2116static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
2117                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
2118                                                          shader_module const *src, spirv_inst_iter type,
2119                                                          VkShaderStageFlagBits stage) {
2120    bool pass = true;
2121
2122    /* strip off ptrs etc */
2123    type = get_struct_type(src, type, false);
2124    assert(type != src->end());
2125
2126    /* validate directly off the offsets. this isn't quite correct for arrays
2127     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2128     * sizes */
2129    for (auto insn : *src) {
2130        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2131
2132            if (insn.word(3) == spv::DecorationOffset) {
2133                unsigned offset = insn.word(4);
2134                auto size = 4; /* bytes; TODO: calculate this based on the type */
2135
2136                bool found_range = false;
2137                for (auto const &range : *push_constant_ranges) {
2138                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2139                        found_range = true;
2140
2141                        if ((range.stageFlags & stage) == 0) {
2142                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2143                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2144                                        "Push constant range covering variable starting at "
2145                                        "offset %u not accessible from stage %s",
2146                                        offset, string_VkShaderStageFlagBits(stage))) {
2147                                pass = false;
2148                            }
2149                        }
2150
2151                        break;
2152                    }
2153                }
2154
2155                if (!found_range) {
2156                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2157                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2158                                "Push constant range covering variable starting at "
2159                                "offset %u not declared in layout",
2160                                offset)) {
2161                        pass = false;
2162                    }
2163                }
2164            }
2165        }
2166    }
2167
2168    return pass;
2169}
2170
2171static bool validate_push_constant_usage(debug_report_data *report_data,
2172                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2173                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2174    bool pass = true;
2175
2176    for (auto id : accessible_ids) {
2177        auto def_insn = src->get_def(id);
2178        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2179            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2180                                                                  src->get_def(def_insn.word(1)), stage);
2181        }
2182    }
2183
2184    return pass;
2185}
2186
2187// For given pipelineLayout verify that the set_layout_node at slot.first
2188//  has the requested binding at slot.second and return ptr to that binding
2189static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2190
2191    if (!pipelineLayout)
2192        return nullptr;
2193
2194    if (slot.first >= pipelineLayout->set_layouts.size())
2195        return nullptr;
2196
2197    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2198}
2199
2200// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2201
2202static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2203
2204// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2205//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2206//   to that same cmd buffer by separate thread are not changing state from underneath us
2207// Track the last cmd buffer touched by this thread
2208
2209static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2210    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2211        if (pCB->drawCount[i])
2212            return true;
2213    }
2214    return false;
2215}
2216
2217// Check object status for selected flag state
2218static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2219                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2220    if (!(pNode->status & status_mask)) {
2221        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2222                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2223                       "command buffer object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer),
2224                       fail_msg);
2225    }
2226    return false;
2227}
2228
2229// Retrieve pipeline node ptr for given pipeline object
2230static PIPELINE_STATE *getPipelineState(layer_data const *my_data, VkPipeline pipeline) {
2231    auto it = my_data->pipelineMap.find(pipeline);
2232    if (it == my_data->pipelineMap.end()) {
2233        return nullptr;
2234    }
2235    return it->second;
2236}
2237
2238static RENDER_PASS_STATE *getRenderPassState(layer_data const *my_data, VkRenderPass renderpass) {
2239    auto it = my_data->renderPassMap.find(renderpass);
2240    if (it == my_data->renderPassMap.end()) {
2241        return nullptr;
2242    }
2243    return it->second.get();
2244}
2245
2246static FRAMEBUFFER_STATE *getFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer) {
2247    auto it = my_data->frameBufferMap.find(framebuffer);
2248    if (it == my_data->frameBufferMap.end()) {
2249        return nullptr;
2250    }
2251    return it->second.get();
2252}
2253
2254cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2255    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2256    if (it == my_data->descriptorSetLayoutMap.end()) {
2257        return nullptr;
2258    }
2259    return it->second;
2260}
2261
2262static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2263    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2264    if (it == my_data->pipelineLayoutMap.end()) {
2265        return nullptr;
2266    }
2267    return &it->second;
2268}
2269
2270// Return true if for a given PSO, the given state enum is dynamic, else return false
2271static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
2272    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2273        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2274            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2275                return true;
2276        }
2277    }
2278    return false;
2279}
2280
2281// Validate state stored as flags at time of draw call
2282static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexedDraw) {
2283    bool result = false;
2284    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2285        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2286         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2287        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2288                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2289    }
2290    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2291        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2292        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2293                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2294    }
2295    if (pPipe->blendConstantsEnabled) {
2296        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2297                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2298    }
2299    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2300        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2301        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2302                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2303    }
2304    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2305        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2306        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2307                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2308        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2309                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2310        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2311                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2312    }
2313    if (indexedDraw) {
2314        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2315                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2316                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2317    }
2318    return result;
2319}
2320
2321// Verify attachment reference compatibility according to spec
2322//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2323//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2324//   to make sure that format and samples counts match.
2325//  If not, they are not compatible.
2326static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2327                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2328                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2329                                             const VkAttachmentDescription *pSecondaryAttachments) {
2330    // Check potential NULL cases first to avoid nullptr issues later
2331    if (pPrimary == nullptr) {
2332        if (pSecondary == nullptr) {
2333            return true;
2334        }
2335        return false;
2336    } else if (pSecondary == nullptr) {
2337        return false;
2338    }
2339    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2340        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2341            return true;
2342    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2343        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2344            return true;
2345    } else { // Format and sample count must match
2346        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2347            return true;
2348        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2349            return false;
2350        }
2351        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2352             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2353            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2354             pSecondaryAttachments[pSecondary[index].attachment].samples))
2355            return true;
2356    }
2357    // Format and sample counts didn't match
2358    return false;
2359}
2360// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2361// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2362static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2363                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2364    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2365        stringstream errorStr;
2366        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2367                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2368        errorMsg = errorStr.str();
2369        return false;
2370    }
2371    uint32_t spIndex = 0;
2372    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2373        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2374        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2375        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2376        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2377        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2378            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2379                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2380                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2381                stringstream errorStr;
2382                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2383                errorMsg = errorStr.str();
2384                return false;
2385            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2386                                                         primaryColorCount, primaryRPCI->pAttachments,
2387                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2388                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2389                stringstream errorStr;
2390                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2391                errorMsg = errorStr.str();
2392                return false;
2393            }
2394        }
2395
2396        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2397                                              1, primaryRPCI->pAttachments,
2398                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2399                                              1, secondaryRPCI->pAttachments)) {
2400            stringstream errorStr;
2401            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2402            errorMsg = errorStr.str();
2403            return false;
2404        }
2405
2406        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2407        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2408        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2409        for (uint32_t i = 0; i < inputMax; ++i) {
2410            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2411                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2412                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2413                stringstream errorStr;
2414                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2415                errorMsg = errorStr.str();
2416                return false;
2417            }
2418        }
2419    }
2420    return true;
2421}
2422
2423// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2424// pipelineLayout[layoutIndex]
2425static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2426                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2427                                            string &errorMsg) {
2428    auto num_sets = pipeline_layout->set_layouts.size();
2429    if (layoutIndex >= num_sets) {
2430        stringstream errorStr;
2431        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2432                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2433                 << layoutIndex;
2434        errorMsg = errorStr.str();
2435        return false;
2436    }
2437    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2438    return pSet->IsCompatible(layout_node, &errorMsg);
2439}
2440
2441// Validate that data for each specialization entry is fully contained within the buffer.
2442static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2443    bool pass = true;
2444
2445    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2446
2447    if (spec) {
2448        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2449            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2450                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2451                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2452                            "Specialization entry %u (for constant id %u) references memory outside provided "
2453                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2454                            " bytes provided)",
2455                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2456                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2457
2458                    pass = false;
2459                }
2460            }
2461        }
2462    }
2463
2464    return pass;
2465}
2466
2467static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2468                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2469    auto type = module->get_def(type_id);
2470
2471    descriptor_count = 1;
2472
2473    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2474     * descriptor count for each dimension. */
2475    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2476        if (type.opcode() == spv::OpTypeArray) {
2477            descriptor_count *= get_constant_value(module, type.word(3));
2478            type = module->get_def(type.word(2));
2479        }
2480        else {
2481            type = module->get_def(type.word(3));
2482        }
2483    }
2484
2485    switch (type.opcode()) {
2486    case spv::OpTypeStruct: {
2487        for (auto insn : *module) {
2488            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2489                if (insn.word(2) == spv::DecorationBlock) {
2490                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2491                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2492                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2493                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2494                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2495                }
2496            }
2497        }
2498
2499        /* Invalid */
2500        return false;
2501    }
2502
2503    case spv::OpTypeSampler:
2504        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2505            descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2506
2507    case spv::OpTypeSampledImage:
2508        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2509            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2510             * doesn't really have a sampler, and a texel buffer descriptor
2511             * doesn't really provide one. Allow this slight mismatch.
2512             */
2513            auto image_type = module->get_def(type.word(2));
2514            auto dim = image_type.word(3);
2515            auto sampled = image_type.word(7);
2516            return dim == spv::DimBuffer && sampled == 1;
2517        }
2518        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2519
2520    case spv::OpTypeImage: {
2521        /* Many descriptor types backing image types-- depends on dimension
2522         * and whether the image will be used with a sampler. SPIRV for
2523         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2524         * runtime is unacceptable.
2525         */
2526        auto dim = type.word(3);
2527        auto sampled = type.word(7);
2528
2529        if (dim == spv::DimSubpassData) {
2530            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2531        } else if (dim == spv::DimBuffer) {
2532            if (sampled == 1) {
2533                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2534            } else {
2535                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2536            }
2537        } else if (sampled == 1) {
2538            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2539                descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2540        } else {
2541            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2542        }
2543    }
2544
2545    /* We shouldn't really see any other junk types -- but if we do, they're
2546     * a mismatch.
2547     */
2548    default:
2549        return false; /* Mismatch */
2550    }
2551}
2552
2553static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2554    if (!feature) {
2555        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2556                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2557                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2558                    "enabled on the device",
2559                    feature_name)) {
2560            return false;
2561        }
2562    }
2563
2564    return true;
2565}
2566
2567static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2568                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2569    bool pass = true;
2570
2571
2572    for (auto insn : *src) {
2573        if (insn.opcode() == spv::OpCapability) {
2574            switch (insn.word(1)) {
2575            case spv::CapabilityMatrix:
2576            case spv::CapabilityShader:
2577            case spv::CapabilityInputAttachment:
2578            case spv::CapabilitySampled1D:
2579            case spv::CapabilityImage1D:
2580            case spv::CapabilitySampledBuffer:
2581            case spv::CapabilityImageBuffer:
2582            case spv::CapabilityImageQuery:
2583            case spv::CapabilityDerivativeControl:
2584                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2585                break;
2586
2587            case spv::CapabilityGeometry:
2588                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2589                break;
2590
2591            case spv::CapabilityTessellation:
2592                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2593                break;
2594
2595            case spv::CapabilityFloat64:
2596                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2597                break;
2598
2599            case spv::CapabilityInt64:
2600                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2601                break;
2602
2603            case spv::CapabilityTessellationPointSize:
2604            case spv::CapabilityGeometryPointSize:
2605                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2606                                        "shaderTessellationAndGeometryPointSize");
2607                break;
2608
2609            case spv::CapabilityImageGatherExtended:
2610                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2611                break;
2612
2613            case spv::CapabilityStorageImageMultisample:
2614                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2615                break;
2616
2617            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2618                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2619                                        "shaderUniformBufferArrayDynamicIndexing");
2620                break;
2621
2622            case spv::CapabilitySampledImageArrayDynamicIndexing:
2623                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2624                                        "shaderSampledImageArrayDynamicIndexing");
2625                break;
2626
2627            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2628                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2629                                        "shaderStorageBufferArrayDynamicIndexing");
2630                break;
2631
2632            case spv::CapabilityStorageImageArrayDynamicIndexing:
2633                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2634                                        "shaderStorageImageArrayDynamicIndexing");
2635                break;
2636
2637            case spv::CapabilityClipDistance:
2638                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2639                break;
2640
2641            case spv::CapabilityCullDistance:
2642                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2643                break;
2644
2645            case spv::CapabilityImageCubeArray:
2646                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2647                break;
2648
2649            case spv::CapabilitySampleRateShading:
2650                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2651                break;
2652
2653            case spv::CapabilitySparseResidency:
2654                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2655                break;
2656
2657            case spv::CapabilityMinLod:
2658                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2659                break;
2660
2661            case spv::CapabilitySampledCubeArray:
2662                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2663                break;
2664
2665            case spv::CapabilityImageMSArray:
2666                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2667                break;
2668
2669            case spv::CapabilityStorageImageExtendedFormats:
2670                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2671                                        "shaderStorageImageExtendedFormats");
2672                break;
2673
2674            case spv::CapabilityInterpolationFunction:
2675                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2676                break;
2677
2678            case spv::CapabilityStorageImageReadWithoutFormat:
2679                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2680                                        "shaderStorageImageReadWithoutFormat");
2681                break;
2682
2683            case spv::CapabilityStorageImageWriteWithoutFormat:
2684                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2685                                        "shaderStorageImageWriteWithoutFormat");
2686                break;
2687
2688            case spv::CapabilityMultiViewport:
2689                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2690                break;
2691
2692            default:
2693                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2694                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2695                            "Shader declares capability %u, not supported in Vulkan.",
2696                            insn.word(1)))
2697                    pass = false;
2698                break;
2699            }
2700        }
2701    }
2702
2703    return pass;
2704}
2705
2706
2707static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2708    auto type = module->get_def(type_id);
2709
2710    while (true) {
2711        switch (type.opcode()) {
2712        case spv::OpTypeArray:
2713        case spv::OpTypeSampledImage:
2714            type = module->get_def(type.word(2));
2715            break;
2716        case spv::OpTypePointer:
2717            type = module->get_def(type.word(3));
2718            break;
2719        case spv::OpTypeImage: {
2720            auto dim = type.word(3);
2721            auto arrayed = type.word(5);
2722            auto msaa = type.word(6);
2723
2724            switch (dim) {
2725            case spv::Dim1D:
2726                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2727            case spv::Dim2D:
2728                return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2729                    (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2730            case spv::Dim3D:
2731                return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2732            case spv::DimCube:
2733                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2734            case spv::DimSubpassData:
2735                return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2736            default:  // buffer, etc.
2737                return 0;
2738            }
2739        }
2740        default:
2741            return 0;
2742        }
2743    }
2744}
2745
2746static bool
2747validate_pipeline_shader_stage(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *pStage,
2748                               PIPELINE_STATE *pipeline, shader_module **out_module, spirv_inst_iter *out_entrypoint,
2749                               VkPhysicalDeviceFeatures const *enabledFeatures,
2750                               std::unordered_map<VkShaderModule, std::unique_ptr<shader_module>> const &shaderModuleMap) {
2751    bool pass = true;
2752    auto module_it = shaderModuleMap.find(pStage->module);
2753    auto module = *out_module = module_it->second.get();
2754
2755    /* find the entrypoint */
2756    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2757    if (entrypoint == module->end()) {
2758        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2759                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2760                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2761                    string_VkShaderStageFlagBits(pStage->stage))) {
2762            return false;   // no point continuing beyond here, any analysis is just going to be garbage.
2763        }
2764    }
2765
2766    /* validate shader capabilities against enabled device features */
2767    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2768
2769    /* mark accessible ids */
2770    auto accessible_ids = mark_accessible_ids(module, entrypoint);
2771
2772    /* validate descriptor set layout against what the entrypoint actually uses */
2773    auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2774
2775    auto pipelineLayout = pipeline->pipeline_layout;
2776
2777    pass &= validate_specialization_offsets(report_data, pStage);
2778    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2779
2780    /* validate descriptor use */
2781    for (auto use : descriptor_uses) {
2782        // While validating shaders capture which slots are used by the pipeline
2783        auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2784        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2785
2786        /* verify given pipelineLayout has requested setLayout with requested binding */
2787        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2788        unsigned required_descriptor_count;
2789
2790        if (!binding) {
2791            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2792                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2793                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2794                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2795                pass = false;
2796            }
2797        } else if (~binding->stageFlags & pStage->stage) {
2798            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2799                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2800                        "Shader uses descriptor slot %u.%u (used "
2801                        "as type `%s`) but descriptor not "
2802                        "accessible from stage %s",
2803                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2804                        string_VkShaderStageFlagBits(pStage->stage))) {
2805                pass = false;
2806            }
2807        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2808                                          /*out*/ required_descriptor_count)) {
2809            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2810                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2811                                                                       "%u.%u (used as type `%s`) but "
2812                                                                       "descriptor of type %s",
2813                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2814                        string_VkDescriptorType(binding->descriptorType))) {
2815                pass = false;
2816            }
2817        } else if (binding->descriptorCount < required_descriptor_count) {
2818            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2819                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2820                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2821                        required_descriptor_count, use.first.first, use.first.second,
2822                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2823                pass = false;
2824            }
2825        }
2826    }
2827
2828    /* validate use of input attachments against subpass structure */
2829    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2830        auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2831
2832        auto rpci = pipeline->render_pass_ci.ptr();
2833        auto subpass = pipeline->graphicsPipelineCI.subpass;
2834
2835        for (auto use : input_attachment_uses) {
2836            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2837            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2838                    input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2839
2840            if (index == VK_ATTACHMENT_UNUSED) {
2841                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2842                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2843                            "Shader consumes input attachment index %d but not provided in subpass",
2844                            use.first)) {
2845                    pass = false;
2846                }
2847            }
2848            else if (get_format_type(rpci->pAttachments[index].format) !=
2849                    get_fundamental_type(module, use.second.type_id)) {
2850                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2851                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2852                            "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2853                            use.first, string_VkFormat(rpci->pAttachments[index].format),
2854                            describe_type(module, use.second.type_id).c_str())) {
2855                    pass = false;
2856                }
2857            }
2858        }
2859    }
2860
2861    return pass;
2862}
2863
2864
2865// Validate that the shaders used by the given pipeline and store the active_slots
2866//  that are actually used by the pipeline into pPipeline->active_slots
2867static bool
2868validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2869                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2870                                           std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2871    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2872    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2873    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2874
2875    shader_module *shaders[5];
2876    memset(shaders, 0, sizeof(shaders));
2877    spirv_inst_iter entrypoints[5];
2878    memset(entrypoints, 0, sizeof(entrypoints));
2879    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2880    bool pass = true;
2881
2882    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2883        auto pStage = &pCreateInfo->pStages[i];
2884        auto stage_id = get_shader_stage_id(pStage->stage);
2885        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2886                                               &shaders[stage_id], &entrypoints[stage_id],
2887                                               enabledFeatures, shaderModuleMap);
2888    }
2889
2890    // if the shader stages are no good individually, cross-stage validation is pointless.
2891    if (!pass)
2892        return false;
2893
2894    vi = pCreateInfo->pVertexInputState;
2895
2896    if (vi) {
2897        pass &= validate_vi_consistency(report_data, vi);
2898    }
2899
2900    if (shaders[vertex_stage]) {
2901        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2902    }
2903
2904    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2905    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2906
2907    while (!shaders[producer] && producer != fragment_stage) {
2908        producer++;
2909        consumer++;
2910    }
2911
2912    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2913        assert(shaders[producer]);
2914        if (shaders[consumer]) {
2915            pass &= validate_interface_between_stages(report_data,
2916                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2917                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2918
2919            producer = consumer;
2920        }
2921    }
2922
2923    if (shaders[fragment_stage]) {
2924        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2925                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2926    }
2927
2928    return pass;
2929}
2930
2931static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2932                                      VkPhysicalDeviceFeatures const *enabledFeatures,
2933                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2934    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2935
2936    shader_module *module;
2937    spirv_inst_iter entrypoint;
2938
2939    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2940                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2941}
2942// Return Set node ptr for specified set or else NULL
2943cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2944    auto set_it = my_data->setMap.find(set);
2945    if (set_it == my_data->setMap.end()) {
2946        return NULL;
2947    }
2948    return set_it->second;
2949}
2950// For the given command buffer, verify and update the state for activeSetBindingsPairs
2951//  This includes:
2952//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2953//     To be valid, the dynamic offset combined with the offset and range from its
2954//     descriptor update must not overflow the size of its buffer being updated
2955//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2956//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2957static bool validate_and_update_drawtime_descriptor_state(
2958    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2959    const vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
2960        &activeSetBindingsPairs,
2961    const char *function) {
2962    bool result = false;
2963    for (auto set_bindings_pair : activeSetBindingsPairs) {
2964        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2965        std::string err_str;
2966        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2967                                         &err_str)) {
2968            // Report error here
2969            auto set = set_node->GetSet();
2970            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2971                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2972                              "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
2973                              reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
2974        }
2975        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2976    }
2977    return result;
2978}
2979
2980// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2981static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
2982    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2983        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2984        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2985    }
2986    return VK_SAMPLE_COUNT_1_BIT;
2987}
2988
2989static void list_bits(std::ostream& s, uint32_t bits) {
2990    for (int i = 0; i < 32 && bits; i++) {
2991        if (bits & (1 << i)) {
2992            s << i;
2993            bits &= ~(1 << i);
2994            if (bits) {
2995                s << ",";
2996            }
2997        }
2998    }
2999}
3000
3001// Validate draw-time state related to the PSO
3002static bool validatePipelineDrawtimeState(layer_data const *my_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
3003                                          PIPELINE_STATE const *pPipeline) {
3004    bool skip_call = false;
3005
3006    // Verify vertex binding
3007    if (pPipeline->vertexBindingDescriptions.size() > 0) {
3008        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
3009            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
3010            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
3011                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
3012                skip_call |= log_msg(
3013                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3014                    DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3015                    "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
3016                    "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
3017                    "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
3018                    (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding);
3019            }
3020        }
3021    } else {
3022        if (!pCB->currentDrawData.buffers.empty()) {
3023            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
3024                                 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3025                                 "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
3026                                 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
3027                                 (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline);
3028        }
3029    }
3030    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
3031    // Skip check if rasterization is disabled or there is no viewport.
3032    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
3033         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
3034        pPipeline->graphicsPipelineCI.pViewportState) {
3035        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3036        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3037
3038        if (dynViewport) {
3039            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
3040            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
3041            if (missingViewportMask) {
3042                std::stringstream ss;
3043                ss << "Dynamic viewport(s) ";
3044                list_bits(ss, missingViewportMask);
3045                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
3046                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3047                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3048                                     "%s", ss.str().c_str());
3049            }
3050        }
3051
3052        if (dynScissor) {
3053            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
3054            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
3055            if (missingScissorMask) {
3056                std::stringstream ss;
3057                ss << "Dynamic scissor(s) ";
3058                list_bits(ss, missingScissorMask);
3059                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
3060                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3061                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3062                                     "%s", ss.str().c_str());
3063            }
3064        }
3065    }
3066
3067    // Verify that any MSAA request in PSO matches sample# in bound FB
3068    // Skip the check if rasterization is disabled.
3069    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3070        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3071        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
3072        if (pCB->activeRenderPass) {
3073            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
3074            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
3075            uint32_t i;
3076
3077            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
3078            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3079                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
3080                skip_call |=
3081                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3082                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3083                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
3084                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
3085                                "must be the same at draw-time.",
3086                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
3087                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3088            }
3089
3090            unsigned subpass_num_samples = 0;
3091
3092            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
3093                auto attachment = subpass_desc->pColorAttachments[i].attachment;
3094                if (attachment != VK_ATTACHMENT_UNUSED)
3095                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3096            }
3097
3098            if (subpass_desc->pDepthStencilAttachment &&
3099                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3100                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
3101                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3102            }
3103
3104            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
3105                skip_call |=
3106                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3107                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3108                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
3109                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
3110                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
3111                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
3112            }
3113        } else {
3114            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3115                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3116                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
3117                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3118        }
3119    }
3120    // Verify that PSO creation renderPass is compatible with active renderPass
3121    if (pCB->activeRenderPass) {
3122        std::string err_string;
3123        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
3124            !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
3125                                             err_string)) {
3126            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
3127            skip_call |=
3128                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3129                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3130                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
3131                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
3132                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass),
3133                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
3134                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
3135        }
3136
3137        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
3138            skip_call |=
3139                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3140                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3141                        "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
3142                        pCB->activeSubpass);
3143        }
3144    }
3145    // TODO : Add more checks here
3146
3147    return skip_call;
3148}
3149
3150// Validate overall state at the time of a draw call
3151static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexedDraw,
3152                                           const VkPipelineBindPoint bindPoint, const char *function) {
3153    bool result = false;
3154    auto const &state = cb_node->lastBound[bindPoint];
3155    PIPELINE_STATE *pPipe = state.pipeline_state;
3156    if (nullptr == pPipe) {
3157        result |= log_msg(
3158            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3159            DRAWSTATE_INVALID_PIPELINE, "DS",
3160            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3161        // Early return as any further checks below will be busted w/o a pipeline
3162        if (result)
3163            return true;
3164    }
3165    // First check flag states
3166    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3167        result = validate_draw_state_flags(my_data, cb_node, pPipe, indexedDraw);
3168
3169    // Now complete other state checks
3170    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3171        string errorString;
3172        auto pipeline_layout = pPipe->pipeline_layout;
3173
3174        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
3175        vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
3176            activeSetBindingsPairs;
3177        for (auto & setBindingPair : pPipe->active_slots) {
3178            uint32_t setIndex = setBindingPair.first;
3179            // If valid set is not bound throw an error
3180            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3181                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3182                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
3183                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
3184                                  setIndex);
3185            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3186                                                        errorString)) {
3187                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
3188                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
3189                result |=
3190                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3191                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
3192                            "VkDescriptorSet (0x%" PRIxLEAST64
3193                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
3194                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3195                            errorString.c_str());
3196            } else { // Valid set is bound and layout compatible, validate that it's updated
3197                // Pull the set node
3198                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
3199                // Gather active bindings
3200                std::unordered_set<uint32_t> bindings;
3201                for (auto binding : setBindingPair.second) {
3202                    bindings.insert(binding.first);
3203                }
3204                // Bind this set and its active descriptor resources to the command buffer
3205                pSet->BindCommandBuffer(cb_node, bindings);
3206                // Save vector of all active sets to verify dynamicOffsets below
3207                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second, &state.dynamicOffsets[setIndex]));
3208                // Make sure set has been updated if it has no immutable samplers
3209                //  If it has immutable samplers, we'll flag error later as needed depending on binding
3210                if (!pSet->IsUpdated()) {
3211                    for (auto binding : bindings) {
3212                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
3213                            result |= log_msg(
3214                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3215                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3216                                "Descriptor Set 0x%" PRIxLEAST64 " bound but was never updated. It is now being used to draw so "
3217                                "this will result in undefined behavior.",
3218                                (uint64_t)pSet->GetSet());
3219                        }
3220                    }
3221                }
3222            }
3223        }
3224        // For given active slots, verify any dynamic descriptors and record updated images & buffers
3225        result |= validate_and_update_drawtime_descriptor_state(my_data, cb_node, activeSetBindingsPairs, function);
3226    }
3227
3228    // Check general pipeline state that needs to be validated at drawtime
3229    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3230        result |= validatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
3231
3232    return result;
3233}
3234
3235// Validate HW line width capabilities prior to setting requested line width.
3236static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3237    bool skip_call = false;
3238
3239    // First check to see if the physical device supports wide lines.
3240    if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
3241        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3242                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3243                                            "not supported/enabled so lineWidth must be 1.0f!",
3244                             lineWidth);
3245    } else {
3246        // Otherwise, make sure the width falls in the valid range.
3247        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3248            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3249            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3250                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3251                                                          "to between [%f, %f]!",
3252                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3253                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3254        }
3255    }
3256
3257    return skip_call;
3258}
3259
3260// Verify that create state for a pipeline is valid
3261static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_STATE *> pPipelines,
3262                                      int pipelineIndex) {
3263    bool skip_call = false;
3264
3265    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
3266
3267    // If create derivative bit is set, check that we've specified a base
3268    // pipeline correctly, and that the base pipeline was created to allow
3269    // derivatives.
3270    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3271        PIPELINE_STATE *pBasePipeline = nullptr;
3272        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3273              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3274            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3275                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3276                                 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3277        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3278            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3279                skip_call |=
3280                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3281                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3282                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3283            } else {
3284                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3285            }
3286        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3287            pBasePipeline = getPipelineState(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3288        }
3289
3290        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3291            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3292                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3293                                 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3294        }
3295    }
3296
3297    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3298        if (!my_data->enabled_features.independentBlend) {
3299            if (pPipeline->attachments.size() > 1) {
3300                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3301                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3302                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3303                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3304                    // only attachment state, so memcmp is best suited for the comparison
3305                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3306                               sizeof(pAttachments[0]))) {
3307                        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3308                                             __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS",
3309                                             "Invalid Pipeline CreateInfo: If independent blend feature not "
3310                                             "enabled, all elements of pAttachments must be identical");
3311                        break;
3312                    }
3313                }
3314            }
3315        }
3316        if (!my_data->enabled_features.logicOp &&
3317            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3318            skip_call |=
3319                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3320                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3321                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3322        }
3323    }
3324
3325    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3326    // produces nonsense errors that confuse users. Other layers should already
3327    // emit errors for renderpass being invalid.
3328    auto renderPass = getRenderPassState(my_data, pPipeline->graphicsPipelineCI.renderPass);
3329    if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
3330        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3331                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3332                                                                            "is out of range for this renderpass (0..%u)",
3333                             pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1);
3334    }
3335
3336    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features,
3337                                                    my_data->shaderModuleMap)) {
3338        skip_call = true;
3339    }
3340    // Each shader's stage must be unique
3341    if (pPipeline->duplicate_shaders) {
3342        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3343            if (pPipeline->duplicate_shaders & stage) {
3344                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3345                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3346                                     "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3347                                     string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3348            }
3349        }
3350    }
3351    // VS is required
3352    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3353        skip_call |=
3354            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3355                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vertex Shader required");
3356    }
3357    // Either both or neither TC/TE shaders should be defined
3358    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3359        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3360        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3361                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3362                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3363    }
3364    // Compute shaders should be specified independent of Gfx shaders
3365    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3366        (pPipeline->active_shaders &
3367         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3368          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3369        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3370                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3371                             "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3372    }
3373    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3374    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3375    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3376        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3377         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3378        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3379                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3380                                                                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3381                                                                            "topology for tessellation pipelines");
3382    }
3383    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3384        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3385        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3386            skip_call |=
3387                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3388                        DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3389                                                                       "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3390                                                                       "topology is only valid for tessellation pipelines");
3391        }
3392        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3393            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3394                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3395                                 "Invalid Pipeline CreateInfo State: "
3396                                 "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3397                                 "topology used. pTessellationState must not be NULL in this case.");
3398        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3399                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3400            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3401                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3402                                                                                "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3403                                                                                "topology used with patchControlPoints value %u."
3404                                                                                " patchControlPoints should be >0 and <=32.",
3405                                 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3406        }
3407    }
3408    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3409    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3410        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3411            skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
3412                                         reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
3413                                         pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3414        }
3415    }
3416    // Viewport state must be included if rasterization is enabled.
3417    // If the viewport state is included, the viewport and scissor counts should always match.
3418    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3419    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3420        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3421        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3422            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3423                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3424                                                                            "and scissors are dynamic PSO must include "
3425                                                                            "viewportCount and scissorCount in pViewportState.");
3426        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3427                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3428            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3429                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3430                                 "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3431                                 pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3432                                 pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3433        } else {
3434            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3435            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3436            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3437            if (!dynViewport) {
3438                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3439                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3440                    skip_call |=
3441                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3442                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3443                                "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3444                                "must either include pViewports data, or include viewport in pDynamicState and set it with "
3445                                "vkCmdSetViewport().",
3446                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3447                }
3448            }
3449            if (!dynScissor) {
3450                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3451                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3452                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3453                                         __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3454                                         "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3455                                         "must either include pScissors data, or include scissor in pDynamicState and set it with "
3456                                         "vkCmdSetScissor().",
3457                                         pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3458                }
3459            }
3460        }
3461
3462        // If rasterization is not disabled, and subpass uses a depth/stencil
3463        // attachment, pDepthStencilState must be a pointer to a valid structure
3464        auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3465        if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3466            subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3467            if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3468                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
3469                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3470                                     "Invalid Pipeline CreateInfo State: "
3471                                     "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3472                                     "depth/stencil attachment");
3473            }
3474        }
3475    }
3476    return skip_call;
3477}
3478
3479// Free the Pipeline nodes
3480static void deletePipelines(layer_data *my_data) {
3481    if (my_data->pipelineMap.size() <= 0)
3482        return;
3483    for (auto &pipe_map_pair : my_data->pipelineMap) {
3484        delete pipe_map_pair.second;
3485    }
3486    my_data->pipelineMap.clear();
3487}
3488
3489// Block of code at start here specifically for managing/tracking DSs
3490
3491// Return Pool node ptr for specified pool or else NULL
3492DESCRIPTOR_POOL_STATE *getDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
3493    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3494    if (pool_it == dev_data->descriptorPoolMap.end()) {
3495        return NULL;
3496    }
3497    return pool_it->second;
3498}
3499
3500// Return false if update struct is of valid type, otherwise flag error and return code from callback
3501static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3502    switch (pUpdateStruct->sType) {
3503    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3504    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3505        return false;
3506    default:
3507        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3508                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3509                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3510                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3511    }
3512}
3513
3514// Set count for given update struct in the last parameter
3515static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3516    switch (pUpdateStruct->sType) {
3517    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3518        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3519    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3520        // TODO : Need to understand this case better and make sure code is correct
3521        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3522    default:
3523        return 0;
3524    }
3525}
3526
3527// For given layout and update, return the first overall index of the layout that is updated
3528static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3529                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3530    return binding_start_index + arrayIndex;
3531}
3532// For given layout and update, return the last overall index of the layout that is updated
3533static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3534                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3535    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3536    return binding_start_index + arrayIndex + count - 1;
3537}
3538// Verify that the descriptor type in the update struct matches what's expected by the layout
3539static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3540                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3541    // First get actual type of update
3542    bool skip_call = false;
3543    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3544    switch (pUpdateStruct->sType) {
3545    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3546        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3547        break;
3548    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3549        /* no need to validate */
3550        return false;
3551        break;
3552    default:
3553        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3554                             DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3555                             "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3556                             string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3557    }
3558    if (!skip_call) {
3559        if (layout_type != actualType) {
3560            skip_call |= log_msg(
3561                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3562                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3563                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3564                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3565        }
3566    }
3567    return skip_call;
3568}
3569//TODO: Consolidate functions
3570bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3571    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3572    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3573        return false;
3574    }
3575    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3576    imgpair.subresource.aspectMask = aspectMask;
3577    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3578    if (imgsubIt == pCB->imageLayoutMap.end()) {
3579        return false;
3580    }
3581    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3582        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3583                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3584                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3585                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3586    }
3587    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3588        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3589                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3590                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3591                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3592    }
3593    node = imgsubIt->second;
3594    return true;
3595}
3596
3597bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3598    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3599        return false;
3600    }
3601    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3602    imgpair.subresource.aspectMask = aspectMask;
3603    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3604    if (imgsubIt == my_data->imageLayoutMap.end()) {
3605        return false;
3606    }
3607    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3608        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3609                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3610                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3611                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3612    }
3613    layout = imgsubIt->second.layout;
3614    return true;
3615}
3616
3617// find layout(s) on the cmd buf level
3618bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3619    ImageSubresourcePair imgpair = {image, true, range};
3620    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3621    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3622    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3623    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3624    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3625    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3626        imgpair = {image, false, VkImageSubresource()};
3627        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3628        if (imgsubIt == pCB->imageLayoutMap.end())
3629            return false;
3630        node = imgsubIt->second;
3631    }
3632    return true;
3633}
3634
3635// find layout(s) on the global level
3636bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3637    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3638    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3639    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3640    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3641    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3642    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3643        imgpair = {imgpair.image, false, VkImageSubresource()};
3644        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3645        if (imgsubIt == my_data->imageLayoutMap.end())
3646            return false;
3647        layout = imgsubIt->second.layout;
3648    }
3649    return true;
3650}
3651
3652bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3653    ImageSubresourcePair imgpair = {image, true, range};
3654    return FindLayout(my_data, imgpair, layout);
3655}
3656
3657bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3658    auto sub_data = my_data->imageSubresourceMap.find(image);
3659    if (sub_data == my_data->imageSubresourceMap.end())
3660        return false;
3661    auto image_state = getImageState(my_data, image);
3662    if (!image_state)
3663        return false;
3664    bool ignoreGlobal = false;
3665    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3666    // potential errors in this case.
3667    if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) {
3668        ignoreGlobal = true;
3669    }
3670    for (auto imgsubpair : sub_data->second) {
3671        if (ignoreGlobal && !imgsubpair.hasSubresource)
3672            continue;
3673        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3674        if (img_data != my_data->imageLayoutMap.end()) {
3675            layouts.push_back(img_data->second.layout);
3676        }
3677    }
3678    return true;
3679}
3680
3681// Set the layout on the global level
3682void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3683    VkImage &image = imgpair.image;
3684    // TODO (mlentine): Maybe set format if new? Not used atm.
3685    my_data->imageLayoutMap[imgpair].layout = layout;
3686    // TODO (mlentine): Maybe make vector a set?
3687    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3688    if (subresource == my_data->imageSubresourceMap[image].end()) {
3689        my_data->imageSubresourceMap[image].push_back(imgpair);
3690    }
3691}
3692
3693// Set the layout on the cmdbuf level
3694void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3695    pCB->imageLayoutMap[imgpair] = node;
3696    // TODO (mlentine): Maybe make vector a set?
3697    auto subresource =
3698        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3699    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3700        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3701    }
3702}
3703
3704void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3705    // TODO (mlentine): Maybe make vector a set?
3706    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3707        pCB->imageSubresourceMap[imgpair.image].end()) {
3708        pCB->imageLayoutMap[imgpair].layout = layout;
3709    } else {
3710        // TODO (mlentine): Could be expensive and might need to be removed.
3711        assert(imgpair.hasSubresource);
3712        IMAGE_CMD_BUF_LAYOUT_NODE node;
3713        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3714            node.initialLayout = layout;
3715        }
3716        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3717    }
3718}
3719
3720template <class OBJECT, class LAYOUT>
3721void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3722    if (imgpair.subresource.aspectMask & aspectMask) {
3723        imgpair.subresource.aspectMask = aspectMask;
3724        SetLayout(pObject, imgpair, layout);
3725    }
3726}
3727
3728template <class OBJECT, class LAYOUT>
3729void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3730    ImageSubresourcePair imgpair = {image, true, range};
3731    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3732    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3733    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3734    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3735}
3736
3737template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3738    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3739    SetLayout(pObject, image, imgpair, layout);
3740}
3741
3742void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3743    auto view_state = getImageViewState(dev_data, imageView);
3744    assert(view_state);
3745    auto image = view_state->create_info.image;
3746    const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
3747    // TODO: Do not iterate over every possibility - consolidate where possible
3748    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3749        uint32_t level = subRange.baseMipLevel + j;
3750        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3751            uint32_t layer = subRange.baseArrayLayer + k;
3752            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3753            // TODO: If ImageView was created with depth or stencil, transition both layouts as
3754            // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3755            // is OK for descriptor set layout validation
3756            if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3757                if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
3758                    sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3759                }
3760            }
3761            SetLayout(pCB, image, sub, layout);
3762        }
3763    }
3764}
3765
3766// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3767// func_str is the name of the calling function
3768// Return false if no errors occur
3769// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3770static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
3771    if (dev_data->instance_data->disabled.idle_descriptor_set)
3772        return false;
3773    bool skip_call = false;
3774    auto set_node = dev_data->setMap.find(set);
3775    if (set_node == dev_data->setMap.end()) {
3776        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3777                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3778                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3779                             (uint64_t)(set));
3780    } else {
3781        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
3782        if (set_node->second->in_use.load()) {
3783            skip_call |=
3784                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3785                        (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3786                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3787                        func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
3788        }
3789    }
3790    return skip_call;
3791}
3792
3793// Remove set from setMap and delete the set
3794static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3795    dev_data->setMap.erase(descriptor_set->GetSet());
3796    delete descriptor_set;
3797}
3798// Free all DS Pools including their Sets & related sub-structs
3799// NOTE : Calls to this function should be wrapped in mutex
3800static void deletePools(layer_data *my_data) {
3801    if (my_data->descriptorPoolMap.size() <= 0)
3802        return;
3803    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3804        // Remove this pools' sets from setMap and delete them
3805        for (auto ds : (*ii).second->sets) {
3806            freeDescriptorSet(my_data, ds);
3807        }
3808        (*ii).second->sets.clear();
3809    }
3810    my_data->descriptorPoolMap.clear();
3811}
3812
3813static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3814                                VkDescriptorPoolResetFlags flags) {
3815    DESCRIPTOR_POOL_STATE *pPool = getDescriptorPoolState(my_data, pool);
3816    // TODO: validate flags
3817    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3818    for (auto ds : pPool->sets) {
3819        freeDescriptorSet(my_data, ds);
3820    }
3821    pPool->sets.clear();
3822    // Reset available count for each type and available sets for this pool
3823    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3824        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3825    }
3826    pPool->availableSets = pPool->maxSets;
3827}
3828
3829// For given CB object, fetch associated CB Node from map
3830static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3831    auto it = my_data->commandBufferMap.find(cb);
3832    if (it == my_data->commandBufferMap.end()) {
3833        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3834                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3835                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3836        return NULL;
3837    }
3838    return it->second;
3839}
3840// Free all CB Nodes
3841// NOTE : Calls to this function should be wrapped in mutex
3842static void deleteCommandBuffers(layer_data *my_data) {
3843    if (my_data->commandBufferMap.empty()) {
3844        return;
3845    }
3846    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3847        delete (*ii).second;
3848    }
3849    my_data->commandBufferMap.clear();
3850}
3851
3852static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3853    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3854                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3855                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3856}
3857
3858bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3859    if (!pCB->activeRenderPass)
3860        return false;
3861    bool skip_call = false;
3862    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3863        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3864        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3865                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3866                             "Commands cannot be called in a subpass using secondary command buffers.");
3867    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3868        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3869                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3870                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3871    }
3872    return skip_call;
3873}
3874
3875static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3876    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3877        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3878                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3879                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3880    return false;
3881}
3882
3883static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3884    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3885        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3886                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3887                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3888    return false;
3889}
3890
3891static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3892    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3893        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3894                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3895                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3896    return false;
3897}
3898
3899// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3900//  in the recording state or if there's an issue with the Cmd ordering
3901static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3902    bool skip_call = false;
3903    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3904    if (pPool) {
3905        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3906        switch (cmd) {
3907        case CMD_BINDPIPELINE:
3908        case CMD_BINDPIPELINEDELTA:
3909        case CMD_BINDDESCRIPTORSETS:
3910        case CMD_FILLBUFFER:
3911        case CMD_CLEARCOLORIMAGE:
3912        case CMD_SETEVENT:
3913        case CMD_RESETEVENT:
3914        case CMD_WAITEVENTS:
3915        case CMD_BEGINQUERY:
3916        case CMD_ENDQUERY:
3917        case CMD_RESETQUERYPOOL:
3918        case CMD_COPYQUERYPOOLRESULTS:
3919        case CMD_WRITETIMESTAMP:
3920            skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3921            break;
3922        case CMD_SETVIEWPORTSTATE:
3923        case CMD_SETSCISSORSTATE:
3924        case CMD_SETLINEWIDTHSTATE:
3925        case CMD_SETDEPTHBIASSTATE:
3926        case CMD_SETBLENDSTATE:
3927        case CMD_SETDEPTHBOUNDSSTATE:
3928        case CMD_SETSTENCILREADMASKSTATE:
3929        case CMD_SETSTENCILWRITEMASKSTATE:
3930        case CMD_SETSTENCILREFERENCESTATE:
3931        case CMD_BINDINDEXBUFFER:
3932        case CMD_BINDVERTEXBUFFER:
3933        case CMD_DRAW:
3934        case CMD_DRAWINDEXED:
3935        case CMD_DRAWINDIRECT:
3936        case CMD_DRAWINDEXEDINDIRECT:
3937        case CMD_BLITIMAGE:
3938        case CMD_CLEARATTACHMENTS:
3939        case CMD_CLEARDEPTHSTENCILIMAGE:
3940        case CMD_RESOLVEIMAGE:
3941        case CMD_BEGINRENDERPASS:
3942        case CMD_NEXTSUBPASS:
3943        case CMD_ENDRENDERPASS:
3944            skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3945            break;
3946        case CMD_DISPATCH:
3947        case CMD_DISPATCHINDIRECT:
3948            skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3949            break;
3950        case CMD_COPYBUFFER:
3951        case CMD_COPYIMAGE:
3952        case CMD_COPYBUFFERTOIMAGE:
3953        case CMD_COPYIMAGETOBUFFER:
3954        case CMD_CLONEIMAGEDATA:
3955        case CMD_UPDATEBUFFER:
3956        case CMD_PIPELINEBARRIER:
3957        case CMD_EXECUTECOMMANDS:
3958        case CMD_END:
3959            break;
3960        default:
3961            break;
3962        }
3963    }
3964    if (pCB->state != CB_RECORDING) {
3965        skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3966    } else {
3967        skip_call |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3968        CMD_NODE cmdNode = {};
3969        // init cmd node and append to end of cmd LL
3970        cmdNode.cmdNumber = ++pCB->numCmds;
3971        cmdNode.type = cmd;
3972        pCB->cmds.push_back(cmdNode);
3973    }
3974    return skip_call;
3975}
3976// For given object struct return a ptr of BASE_NODE type for its wrapping struct
3977BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
3978    BASE_NODE *base_ptr = nullptr;
3979    switch (object_struct.type) {
3980    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
3981        base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
3982        break;
3983    }
3984    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
3985        base_ptr = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
3986        break;
3987    }
3988    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
3989        base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
3990        break;
3991    }
3992    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
3993        base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
3994        break;
3995    }
3996    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
3997        base_ptr = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
3998        break;
3999    }
4000    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4001        base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
4002        break;
4003    }
4004    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4005        base_ptr = getImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
4006        break;
4007    }
4008    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4009        base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
4010        break;
4011    }
4012    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4013        base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
4014        break;
4015    }
4016    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4017        base_ptr = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
4018        break;
4019    }
4020    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4021        base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
4022        break;
4023    }
4024    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4025        base_ptr = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
4026        break;
4027    }
4028    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4029        base_ptr = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
4030        break;
4031    }
4032    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4033        base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
4034        break;
4035    }
4036    default:
4037        // TODO : Any other objects to be handled here?
4038        assert(0);
4039        break;
4040    }
4041    return base_ptr;
4042}
4043
4044// Tie the VK_OBJECT to the cmd buffer which includes:
4045//  Add object_binding to cmd buffer
4046//  Add cb_binding to object
4047static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
4048    cb_bindings->insert(cb_node);
4049    cb_node->object_bindings.insert(obj);
4050}
4051// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
4052static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
4053    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
4054    if (base_obj)
4055        base_obj->cb_bindings.erase(cb_node);
4056}
4057// Reset the command buffer state
4058//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4059static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
4060    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
4061    if (pCB) {
4062        pCB->in_use.store(0);
4063        pCB->cmds.clear();
4064        // Reset CB state (note that createInfo is not cleared)
4065        pCB->commandBuffer = cb;
4066        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4067        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4068        pCB->numCmds = 0;
4069        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4070        pCB->state = CB_NEW;
4071        pCB->submitCount = 0;
4072        pCB->status = 0;
4073        pCB->viewportMask = 0;
4074        pCB->scissorMask = 0;
4075
4076        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4077            pCB->lastBound[i].reset();
4078        }
4079
4080        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4081        pCB->activeRenderPass = nullptr;
4082        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4083        pCB->activeSubpass = 0;
4084        pCB->broken_bindings.clear();
4085        pCB->waitedEvents.clear();
4086        pCB->events.clear();
4087        pCB->writeEventsBeforeWait.clear();
4088        pCB->waitedEventsBeforeQueryReset.clear();
4089        pCB->queryToStateMap.clear();
4090        pCB->activeQueries.clear();
4091        pCB->startedQueries.clear();
4092        pCB->imageSubresourceMap.clear();
4093        pCB->imageLayoutMap.clear();
4094        pCB->eventToStageMap.clear();
4095        pCB->drawData.clear();
4096        pCB->currentDrawData.buffers.clear();
4097        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4098        // Make sure any secondaryCommandBuffers are removed from globalInFlight
4099        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
4100            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
4101        }
4102        pCB->secondaryCommandBuffers.clear();
4103        pCB->updateImages.clear();
4104        pCB->updateBuffers.clear();
4105        clear_cmd_buf_and_mem_references(dev_data, pCB);
4106        pCB->eventUpdates.clear();
4107        pCB->queryUpdates.clear();
4108
4109        // Remove object bindings
4110        for (auto obj : pCB->object_bindings) {
4111            removeCommandBufferBinding(dev_data, &obj, pCB);
4112        }
4113        pCB->object_bindings.clear();
4114        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
4115        for (auto framebuffer : pCB->framebuffers) {
4116            auto fb_state = getFramebufferState(dev_data, framebuffer);
4117            if (fb_state)
4118                fb_state->cb_bindings.erase(pCB);
4119        }
4120        pCB->framebuffers.clear();
4121        pCB->activeFramebuffer = VK_NULL_HANDLE;
4122    }
4123}
4124
4125// Set PSO-related status bits for CB, including dynamic state set via PSO
4126static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
4127    // Account for any dynamic state not set via this PSO
4128    if (!pPipe->graphicsPipelineCI.pDynamicState ||
4129        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4130        pCB->status |= CBSTATUS_ALL_STATE_SET;
4131    } else {
4132        // First consider all state on
4133        // Then unset any state that's noted as dynamic in PSO
4134        // Finally OR that into CB statemask
4135        CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
4136        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4137            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4138            case VK_DYNAMIC_STATE_LINE_WIDTH:
4139                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4140                break;
4141            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4142                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4143                break;
4144            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4145                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4146                break;
4147            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4148                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4149                break;
4150            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4151                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4152                break;
4153            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4154                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4155                break;
4156            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4157                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4158                break;
4159            default:
4160                // TODO : Flag error here
4161                break;
4162            }
4163        }
4164        pCB->status |= psoDynStateMask;
4165    }
4166}
4167
4168// Print the last bound Gfx Pipeline
4169static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4170    bool skip_call = false;
4171    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4172    if (pCB) {
4173        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
4174        if (!pPipeTrav) {
4175            // nothing to print
4176        } else {
4177            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4178                                 __LINE__, DRAWSTATE_NONE, "DS", "%s",
4179                                 vk_print_vkgraphicspipelinecreateinfo(
4180                                     reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4181                                     .c_str());
4182        }
4183    }
4184    return skip_call;
4185}
4186
4187static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4188    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4189    if (pCB && pCB->cmds.size() > 0) {
4190        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4191                DRAWSTATE_NONE, "DS", "Cmds in command buffer 0x%p", (void *)cb);
4192        vector<CMD_NODE> cmds = pCB->cmds;
4193        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4194            // TODO : Need to pass cmdbuffer as srcObj here
4195            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4196                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4197        }
4198    } else {
4199        // Nothing to print
4200    }
4201}
4202
4203static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4204    bool skip_call = false;
4205    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4206        return skip_call;
4207    }
4208    skip_call |= printPipeline(my_data, cb);
4209    return skip_call;
4210}
4211
4212// Flags validation error if the associated call is made inside a render pass. The apiName
4213// routine should ONLY be called outside a render pass.
4214static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4215    bool inside = false;
4216    if (pCB->activeRenderPass) {
4217        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4218                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4219                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
4220                         (uint64_t)pCB->activeRenderPass->renderPass);
4221    }
4222    return inside;
4223}
4224
4225// Flags validation error if the associated call is made outside a render pass. The apiName
4226// routine should ONLY be called inside a render pass.
4227static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4228    bool outside = false;
4229    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4230        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4231         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4232        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4233                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4234                          "%s: This call must be issued inside an active render pass.", apiName);
4235    }
4236    return outside;
4237}
4238
4239static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4240
4241    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4242
4243}
4244
4245static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
4246    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4247        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
4248            instance_data->surfaceExtensionEnabled = true;
4249        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
4250            instance_data->displayExtensionEnabled = true;
4251#ifdef VK_USE_PLATFORM_ANDROID_KHR
4252        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
4253            instance_data->androidSurfaceExtensionEnabled = true;
4254#endif
4255#ifdef VK_USE_PLATFORM_MIR_KHR
4256        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
4257            instance_data->mirSurfaceExtensionEnabled = true;
4258#endif
4259#ifdef VK_USE_PLATFORM_WAYLAND_KHR
4260        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
4261            instance_data->waylandSurfaceExtensionEnabled = true;
4262#endif
4263#ifdef VK_USE_PLATFORM_WIN32_KHR
4264        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
4265            instance_data->win32SurfaceExtensionEnabled = true;
4266#endif
4267#ifdef VK_USE_PLATFORM_XCB_KHR
4268        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
4269            instance_data->xcbSurfaceExtensionEnabled = true;
4270#endif
4271#ifdef VK_USE_PLATFORM_XLIB_KHR
4272        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
4273            instance_data->xlibSurfaceExtensionEnabled = true;
4274#endif
4275    }
4276}
4277
4278VKAPI_ATTR VkResult VKAPI_CALL
4279CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4280    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4281
4282    assert(chain_info->u.pLayerInfo);
4283    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4284    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4285    if (fpCreateInstance == NULL)
4286        return VK_ERROR_INITIALIZATION_FAILED;
4287
4288    // Advance the link info for the next element on the chain
4289    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4290
4291    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4292    if (result != VK_SUCCESS)
4293        return result;
4294
4295    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map);
4296    instance_data->instance = *pInstance;
4297    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
4298
4299    instance_data->report_data = debug_report_create_instance(
4300        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4301    checkInstanceRegisterExtensions(pCreateInfo, instance_data);
4302    init_core_validation(instance_data, pAllocator);
4303
4304    ValidateLayerOrdering(*pCreateInfo);
4305
4306    return result;
4307}
4308
4309/* hook DestroyInstance to remove tableInstanceMap entry */
4310VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4311    // TODOSC : Shouldn't need any customization here
4312    dispatch_key key = get_dispatch_key(instance);
4313    // TBD: Need any locking this early, in case this function is called at the
4314    // same time by more than one thread?
4315    instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map);
4316    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
4317
4318    std::lock_guard<std::mutex> lock(global_lock);
4319    // Clean up logging callback, if any
4320    while (instance_data->logging_callback.size() > 0) {
4321        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
4322        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
4323        instance_data->logging_callback.pop_back();
4324    }
4325
4326    layer_debug_report_destroy_instance(instance_data->report_data);
4327    layer_data_map.erase(key);
4328}
4329
4330static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4331    uint32_t i;
4332    // TBD: Need any locking, in case this function is called at the same time
4333    // by more than one thread?
4334    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4335    dev_data->device_extensions.wsi_enabled = false;
4336    dev_data->device_extensions.wsi_display_swapchain_enabled = false;
4337
4338    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4339        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4340            dev_data->device_extensions.wsi_enabled = true;
4341        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4342            dev_data->device_extensions.wsi_display_swapchain_enabled = true;
4343    }
4344}
4345
4346// Verify that queue family has been properly requested
4347bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu, const VkDeviceCreateInfo *create_info) {
4348    bool skip_call = false;
4349    auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
4350    // First check is app has actually requested queueFamilyProperties
4351    if (!physical_device_state) {
4352        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4353                             0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4354                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
4355    } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
4356        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
4357        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
4358                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4359                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4360    } else {
4361        // Check that the requested queue properties are valid
4362        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4363            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4364            if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
4365                skip_call |= log_msg(
4366                    instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4367                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4368                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4369            } else if (create_info->pQueueCreateInfos[i].queueCount >
4370                       physical_device_state->queue_family_properties[requestedIndex].queueCount) {
4371                skip_call |=
4372                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4373                            0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4374                            "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4375                            "requested queueCount is %u.",
4376                            requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
4377                            create_info->pQueueCreateInfos[i].queueCount);
4378            }
4379        }
4380    }
4381    return skip_call;
4382}
4383
4384// Verify that features have been queried and that they are available
4385static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys, const VkPhysicalDeviceFeatures *requested_features) {
4386    bool skip_call = false;
4387
4388    auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
4389    const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
4390    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4391    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4392    //  Need to provide the struct member name with the issue. To do that seems like we'll
4393    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4394    uint32_t errors = 0;
4395    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4396    for (uint32_t i = 0; i < total_bools; i++) {
4397        if (requested[i] > actual[i]) {
4398            // TODO: Add index to struct member name helper to be able to include a feature name
4399            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4400                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4401                "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4402                "which is not available on this device.",
4403                i);
4404            errors++;
4405        }
4406    }
4407    if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
4408        // If user didn't request features, notify them that they should
4409        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4410        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4411                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4412                             "DL", "You requested features that are unavailable on this device. You should first query feature "
4413                                   "availability by calling vkGetPhysicalDeviceFeatures().");
4414    }
4415    return skip_call;
4416}
4417
4418VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4419                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4420    instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map);
4421    bool skip_call = false;
4422
4423    // Check that any requested features are available
4424    if (pCreateInfo->pEnabledFeatures) {
4425        skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures);
4426    }
4427    skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo);
4428
4429    if (skip_call) {
4430        return VK_ERROR_VALIDATION_FAILED_EXT;
4431    }
4432
4433    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4434
4435    assert(chain_info->u.pLayerInfo);
4436    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4437    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4438    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4439    if (fpCreateDevice == NULL) {
4440        return VK_ERROR_INITIALIZATION_FAILED;
4441    }
4442
4443    // Advance the link info for the next element on the chain
4444    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4445
4446    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4447    if (result != VK_SUCCESS) {
4448        return result;
4449    }
4450
4451    std::unique_lock<std::mutex> lock(global_lock);
4452    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4453
4454    my_device_data->instance_data = my_instance_data;
4455    // Setup device dispatch table
4456    layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr);
4457    my_device_data->device = *pDevice;
4458
4459    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4460    checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4461    // Get physical device limits for this device
4462    my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4463    uint32_t count;
4464    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4465    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4466    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
4467        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4468    // TODO: device limits should make sure these are compatible
4469    if (pCreateInfo->pEnabledFeatures) {
4470        my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
4471    } else {
4472        memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
4473    }
4474    // Store physical device mem limits into device layer_data struct
4475    my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4476    lock.unlock();
4477
4478    ValidateLayerOrdering(*pCreateInfo);
4479
4480    return result;
4481}
4482
4483// prototype
4484VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4485    // TODOSC : Shouldn't need any customization here
4486    bool skip = false;
4487    dispatch_key key = get_dispatch_key(device);
4488    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4489    // Free all the memory
4490    std::unique_lock<std::mutex> lock(global_lock);
4491    deletePipelines(dev_data);
4492    dev_data->renderPassMap.clear();
4493    deleteCommandBuffers(dev_data);
4494    // This will also delete all sets in the pool & remove them from setMap
4495    deletePools(dev_data);
4496    // All sets should be removed
4497    assert(dev_data->setMap.empty());
4498    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4499        delete del_layout.second;
4500    }
4501    dev_data->descriptorSetLayoutMap.clear();
4502    dev_data->imageViewMap.clear();
4503    dev_data->imageMap.clear();
4504    dev_data->imageSubresourceMap.clear();
4505    dev_data->imageLayoutMap.clear();
4506    dev_data->bufferViewMap.clear();
4507    dev_data->bufferMap.clear();
4508    // Queues persist until device is destroyed
4509    dev_data->queueMap.clear();
4510    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4511            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4512    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4513            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4514    print_mem_list(dev_data);
4515    printCBList(dev_data);
4516    // Report any memory leaks
4517    DEVICE_MEM_INFO *pInfo = NULL;
4518    if (!dev_data->memObjMap.empty()) {
4519        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4520            pInfo = (*ii).second.get();
4521            if (pInfo->alloc_info.allocationSize != 0) {
4522                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4523                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4524                                (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM",
4525                                "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4526                                "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4527                                (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4528            }
4529        }
4530    }
4531    layer_debug_report_destroy_device(device);
4532    lock.unlock();
4533
4534#if DISPATCH_MAP_DEBUG
4535    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4536#endif
4537    if (!skip) {
4538        dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4539        layer_data_map.erase(key);
4540    }
4541}
4542
4543static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4544
4545// This validates that the initial layout specified in the command buffer for
4546// the IMAGE is the same
4547// as the global IMAGE layout
4548static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4549    bool skip_call = false;
4550    for (auto cb_image_data : pCB->imageLayoutMap) {
4551        VkImageLayout imageLayout;
4552        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4553            skip_call |=
4554                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4555                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4556                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4557        } else {
4558            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4559                // TODO: Set memory invalid which is in mem_tracker currently
4560            } else if (imageLayout != cb_image_data.second.initialLayout) {
4561                if (cb_image_data.first.hasSubresource) {
4562                    skip_call |= log_msg(
4563                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4564                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4565                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4566                        "with layout %s when first use is %s.",
4567                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4568                                cb_image_data.first.subresource.arrayLayer,
4569                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4570                        string_VkImageLayout(cb_image_data.second.initialLayout));
4571                } else {
4572                    skip_call |= log_msg(
4573                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4574                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4575                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4576                        "first use is %s.",
4577                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4578                        string_VkImageLayout(cb_image_data.second.initialLayout));
4579                }
4580            }
4581            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4582        }
4583    }
4584    return skip_call;
4585}
4586
4587// Loop through bound objects and increment their in_use counts
4588//  For any unknown objects, flag an error
4589static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4590    bool skip = false;
4591    DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4592    BASE_NODE *base_obj = nullptr;
4593    for (auto obj : cb_node->object_bindings) {
4594        switch (obj.type) {
4595        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4596            base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4597            error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4598            break;
4599        }
4600        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4601            base_obj = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4602            error_code = DRAWSTATE_INVALID_SAMPLER;
4603            break;
4604        }
4605        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4606            base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4607            error_code = DRAWSTATE_INVALID_QUERY_POOL;
4608            break;
4609        }
4610        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4611            base_obj = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4612            error_code = DRAWSTATE_INVALID_PIPELINE;
4613            break;
4614        }
4615        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4616            base_obj = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4617            error_code = DRAWSTATE_INVALID_BUFFER;
4618            break;
4619        }
4620        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4621            base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4622            error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4623            break;
4624        }
4625        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4626            base_obj = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4627            error_code = DRAWSTATE_INVALID_IMAGE;
4628            break;
4629        }
4630        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4631            base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4632            error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4633            break;
4634        }
4635        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4636            base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4637            error_code = DRAWSTATE_INVALID_EVENT;
4638            break;
4639        }
4640        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4641            base_obj = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4642            error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4643            break;
4644        }
4645        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4646            base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4647            error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4648            break;
4649        }
4650        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4651            base_obj = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4652            error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4653            break;
4654        }
4655        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4656            base_obj = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4657            error_code = DRAWSTATE_INVALID_RENDERPASS;
4658            break;
4659        }
4660        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4661            base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4662            error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4663            break;
4664        }
4665        default:
4666            // TODO : Merge handling of other objects types into this code
4667            break;
4668        }
4669        if (!base_obj) {
4670            skip |=
4671                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4672                        "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4673        } else {
4674            base_obj->in_use.fetch_add(1);
4675        }
4676    }
4677    return skip;
4678}
4679
4680// Track which resources are in-flight by atomically incrementing their "in_use" count
4681static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4682    bool skip_call = false;
4683
4684    cb_node->in_use.fetch_add(1);
4685    dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4686
4687    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4688    skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4689    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4690    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4691    //  should then be flagged prior to calling this function
4692    for (auto drawDataElement : cb_node->drawData) {
4693        for (auto buffer : drawDataElement.buffers) {
4694            auto buffer_node = getBufferNode(dev_data, buffer);
4695            if (!buffer_node) {
4696                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4697                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4698                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4699            } else {
4700                buffer_node->in_use.fetch_add(1);
4701            }
4702        }
4703    }
4704    for (auto event : cb_node->writeEventsBeforeWait) {
4705        auto event_state = getEventNode(dev_data, event);
4706        if (event_state)
4707            event_state->write_in_use++;
4708    }
4709    return skip_call;
4710}
4711
4712// Note: This function assumes that the global lock is held by the calling
4713// thread.
4714// TODO: untangle this.
4715static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4716    bool skip_call = false;
4717    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4718    if (pCB) {
4719        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4720            for (auto event : queryEventsPair.second) {
4721                if (my_data->eventMap[event].needsSignaled) {
4722                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4723                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4724                                         "Cannot get query results on queryPool 0x%" PRIx64
4725                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4726                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4727                }
4728            }
4729        }
4730    }
4731    return skip_call;
4732}
4733
4734// TODO: nuke this completely.
4735// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4736static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4737    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4738    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4739    pCB->in_use.fetch_sub(1);
4740    if (!pCB->in_use.load()) {
4741        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4742    }
4743}
4744
4745// Decrement in-use count for objects bound to command buffer
4746static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4747    BASE_NODE *base_obj = nullptr;
4748    for (auto obj : cb_node->object_bindings) {
4749        base_obj = GetStateStructPtrFromObject(dev_data, obj);
4750        if (base_obj) {
4751            base_obj->in_use.fetch_sub(1);
4752        }
4753    }
4754}
4755
4756static bool RetireWorkOnQueue(layer_data *dev_data, QUEUE_NODE *pQueue, uint64_t seq)
4757{
4758    bool skip_call = false; // TODO: extract everything that might fail to precheck
4759    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4760
4761    // Roll this queue forward, one submission at a time.
4762    while (pQueue->seq < seq) {
4763        auto & submission = pQueue->submissions.front();
4764
4765        for (auto & wait : submission.waitSemaphores) {
4766            auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4767            if (pSemaphore) {
4768                pSemaphore->in_use.fetch_sub(1);
4769            }
4770            auto & lastSeq = otherQueueSeqs[wait.queue];
4771            lastSeq = std::max(lastSeq, wait.seq);
4772        }
4773
4774        for (auto & semaphore : submission.signalSemaphores) {
4775            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4776            if (pSemaphore) {
4777                pSemaphore->in_use.fetch_sub(1);
4778            }
4779        }
4780
4781        for (auto cb : submission.cbs) {
4782            auto cb_node = getCBNode(dev_data, cb);
4783            if (!cb_node) {
4784                continue;
4785            }
4786            // First perform decrement on general case bound objects
4787            DecrementBoundResources(dev_data, cb_node);
4788            for (auto drawDataElement : cb_node->drawData) {
4789                for (auto buffer : drawDataElement.buffers) {
4790                    auto buffer_node = getBufferNode(dev_data, buffer);
4791                    if (buffer_node) {
4792                        buffer_node->in_use.fetch_sub(1);
4793                    }
4794                }
4795            }
4796            for (auto event : cb_node->writeEventsBeforeWait) {
4797                auto eventNode = dev_data->eventMap.find(event);
4798                if (eventNode != dev_data->eventMap.end()) {
4799                    eventNode->second.write_in_use--;
4800                }
4801            }
4802            for (auto queryStatePair : cb_node->queryToStateMap) {
4803                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4804            }
4805            for (auto eventStagePair : cb_node->eventToStageMap) {
4806                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4807            }
4808
4809            skip_call |= cleanInFlightCmdBuffer(dev_data, cb);
4810            removeInFlightCmdBuffer(dev_data, cb);
4811        }
4812
4813        auto pFence = getFenceNode(dev_data, submission.fence);
4814        if (pFence) {
4815            pFence->state = FENCE_RETIRED;
4816        }
4817
4818        pQueue->submissions.pop_front();
4819        pQueue->seq++;
4820    }
4821
4822    // Roll other queues forward to the highest seq we saw a wait for
4823    for (auto qs : otherQueueSeqs) {
4824        skip_call |= RetireWorkOnQueue(dev_data, getQueueNode(dev_data, qs.first), qs.second);
4825    }
4826
4827    return skip_call;
4828}
4829
4830
4831// Submit a fence to a queue, delimiting previous fences and previous untracked
4832// work by it.
4833static void
4834SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence, uint64_t submitCount)
4835{
4836    pFence->state = FENCE_INFLIGHT;
4837    pFence->signaler.first = pQueue->queue;
4838    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4839}
4840
4841static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4842    bool skip_call = false;
4843    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4844        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4845        skip_call |=
4846            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4847                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4848                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4849                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4850    }
4851    return skip_call;
4852}
4853
4854static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
4855    bool skip = false;
4856    if (dev_data->instance_data->disabled.command_buffer_state)
4857        return skip;
4858    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4859    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4860        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4861                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4862                        "Commandbuffer 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4863                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4864                        (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4865    }
4866    // Validate that cmd buffers have been updated
4867    if (CB_RECORDED != pCB->state) {
4868        if (CB_INVALID == pCB->state) {
4869            // Inform app of reason CB invalid
4870            for (auto obj : pCB->broken_bindings) {
4871                const char *type_str = object_type_to_string(obj.type);
4872                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4873                const char *cause_str =
4874                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4875
4876                skip |=
4877                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4878                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4879                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because bound %s 0x%" PRIxLEAST64
4880                            " was %s.",
4881                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), type_str, obj.handle, cause_str);
4882            }
4883        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4884            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4885                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4886                            "You must call vkEndCommandBuffer() on command buffer 0x%" PRIxLEAST64 " before this call to %s!",
4887                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), call_source);
4888        }
4889    }
4890    return skip;
4891}
4892
4893// Validate that queueFamilyIndices of primary command buffers match this queue
4894// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4895static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4896    bool skip_call = false;
4897    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4898    auto queue_node = getQueueNode(dev_data, queue);
4899
4900    if (pPool && queue_node && (pPool->queueFamilyIndex != queue_node->queueFamilyIndex)) {
4901        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4902            reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
4903            "vkQueueSubmit: Primary command buffer 0x%" PRIxLEAST64
4904            " created in queue family %d is being submitted on queue 0x%" PRIxLEAST64 " from queue family %d.",
4905            reinterpret_cast<uint64_t>(pCB->commandBuffer), pPool->queueFamilyIndex,
4906            reinterpret_cast<uint64_t>(queue), queue_node->queueFamilyIndex);
4907    }
4908
4909    return skip_call;
4910}
4911
4912static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4913    // Track in-use for resources off of primary and any secondary CBs
4914    bool skip_call = false;
4915
4916    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4917    // on device
4918    skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4919
4920    skip_call |= validateAndIncrementResources(dev_data, pCB);
4921
4922    if (!pCB->secondaryCommandBuffers.empty()) {
4923        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4924            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4925            skip_call |= validateAndIncrementResources(dev_data, pSubCB);
4926            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4927                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4928                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4929                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4930                        "Commandbuffer 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4931                        " but that buffer has subsequently been bound to "
4932                        "primary cmd buffer 0x%" PRIxLEAST64
4933                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4934                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4935                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4936            }
4937        }
4938    }
4939
4940    skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
4941
4942    return skip_call;
4943}
4944
4945static bool
4946ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4947{
4948    bool skip_call = false;
4949
4950    if (pFence) {
4951        if (pFence->state == FENCE_INFLIGHT) {
4952            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4953                                 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4954                                 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4955        }
4956
4957        else if (pFence->state == FENCE_RETIRED) {
4958            skip_call |=
4959                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4960                        reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4961                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4962                        reinterpret_cast<uint64_t &>(pFence->fence));
4963        }
4964    }
4965
4966    return skip_call;
4967}
4968
4969
4970VKAPI_ATTR VkResult VKAPI_CALL
4971QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4972    bool skip_call = false;
4973    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4974    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4975    std::unique_lock<std::mutex> lock(global_lock);
4976
4977    auto pQueue = getQueueNode(dev_data, queue);
4978    auto pFence = getFenceNode(dev_data, fence);
4979    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
4980
4981    if (skip_call) {
4982        return VK_ERROR_VALIDATION_FAILED_EXT;
4983    }
4984
4985    // TODO : Review these old print functions and clean up as appropriate
4986    print_mem_list(dev_data);
4987    printCBList(dev_data);
4988
4989    // Mark the fence in-use.
4990    if (pFence) {
4991        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
4992    }
4993
4994    // Now verify each individual submit
4995    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4996        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4997        vector<SEMAPHORE_WAIT> semaphore_waits;
4998        vector<VkSemaphore> semaphore_signals;
4999        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5000            VkSemaphore semaphore = submit->pWaitSemaphores[i];
5001            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5002            if (pSemaphore) {
5003                if (pSemaphore->signaled) {
5004                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
5005                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
5006                        pSemaphore->in_use.fetch_add(1);
5007                    }
5008                    pSemaphore->signaler.first = VK_NULL_HANDLE;
5009                    pSemaphore->signaled = false;
5010                } else {
5011                    skip_call |=
5012                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5013                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5014                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
5015                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5016                }
5017            }
5018        }
5019        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5020            VkSemaphore semaphore = submit->pSignalSemaphores[i];
5021            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5022            if (pSemaphore) {
5023                if (pSemaphore->signaled) {
5024                    skip_call |=
5025                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5026                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5027                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
5028                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
5029                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5030                                reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
5031                } else {
5032                    pSemaphore->signaler.first = queue;
5033                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
5034                    pSemaphore->signaled = true;
5035                    pSemaphore->in_use.fetch_add(1);
5036                    semaphore_signals.push_back(semaphore);
5037                }
5038            }
5039        }
5040
5041        std::vector<VkCommandBuffer> cbs;
5042
5043        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5044            auto cb_node = getCBNode(dev_data, submit->pCommandBuffers[i]);
5045            skip_call |= ValidateCmdBufImageLayouts(dev_data, cb_node);
5046            if (cb_node) {
5047                cbs.push_back(submit->pCommandBuffers[i]);
5048                for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) {
5049                    cbs.push_back(secondaryCmdBuffer);
5050                }
5051
5052                cb_node->submitCount++; // increment submit count
5053                skip_call |= validatePrimaryCommandBufferState(dev_data, cb_node);
5054                skip_call |= validateQueueFamilyIndices(dev_data, cb_node, queue);
5055                // Potential early exit here as bad object state may crash in delayed function calls
5056                if (skip_call)
5057                    return result;
5058                // Call submit-time functions to validate/update state
5059                for (auto &function : cb_node->validate_functions) {
5060                    skip_call |= function();
5061                }
5062                for (auto &function : cb_node->eventUpdates) {
5063                    skip_call |= function(queue);
5064                }
5065                for (auto &function : cb_node->queryUpdates) {
5066                    skip_call |= function(queue);
5067                }
5068            }
5069        }
5070
5071        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
5072                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
5073    }
5074
5075    if (pFence && !submitCount) {
5076        // If no submissions, but just dropping a fence on the end of the queue,
5077        // record an empty submission with just the fence, so we can determine
5078        // its completion.
5079        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
5080                                         std::vector<SEMAPHORE_WAIT>(),
5081                                         std::vector<VkSemaphore>(),
5082                                         fence);
5083    }
5084
5085    lock.unlock();
5086    if (!skip_call)
5087        result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
5088
5089    return result;
5090}
5091
5092static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
5093    bool skip = false;
5094    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
5095        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
5096                        reinterpret_cast<const uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_00611, "MEM",
5097                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
5098                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
5099                        validation_error_map[VALIDATION_ERROR_00611]);
5100    }
5101    return skip;
5102}
5103
5104static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
5105    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
5106    print_mem_list(dev_data);
5107    return;
5108}
5109
5110VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5111                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5112    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5113    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5114    std::unique_lock<std::mutex> lock(global_lock);
5115    bool skip = PreCallValidateAllocateMemory(dev_data);
5116    if (!skip) {
5117        lock.unlock();
5118        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5119        lock.lock();
5120        if (VK_SUCCESS == result) {
5121            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
5122        }
5123    }
5124    return result;
5125}
5126
5127// For given obj node, if it is use, flag a validation error and return callback result, else return false
5128bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
5129                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
5130    if (dev_data->instance_data->disabled.object_in_use)
5131        return false;
5132    bool skip = false;
5133    if (obj_node->in_use.load()) {
5134        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
5135                        error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
5136                        object_type_to_string(obj_struct.type), obj_struct.handle, validation_error_map[error_code]);
5137    }
5138    return skip;
5139}
5140
5141static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
5142    *mem_info = getMemObjInfo(dev_data, mem);
5143    *obj_struct = {reinterpret_cast<uint64_t &>(mem), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT};
5144    if (dev_data->instance_data->disabled.free_memory)
5145        return false;
5146    bool skip = false;
5147    if (*mem_info) {
5148        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_00620);
5149    }
5150    return skip;
5151}
5152
5153static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
5154    // Clear mem binding for any bound objects
5155    for (auto obj : mem_info->obj_bindings) {
5156        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, MEMTRACK_FREED_MEM_REF,
5157                "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64, obj.handle,
5158                (uint64_t)mem_info->mem);
5159        switch (obj.type) {
5160        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
5161            auto image_state = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
5162            assert(image_state); // Any destroyed images should already be removed from bindings
5163            image_state->binding.mem = MEMORY_UNBOUND;
5164            break;
5165        }
5166        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
5167            auto buff_node = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
5168            assert(buff_node); // Any destroyed buffers should already be removed from bindings
5169            buff_node->binding.mem = MEMORY_UNBOUND;
5170            break;
5171        }
5172        default:
5173            // Should only have buffer or image objects bound to memory
5174            assert(0);
5175        }
5176    }
5177    // Any bound cmd buffers are now invalid
5178    invalidateCommandBuffers(mem_info->cb_bindings, obj_struct);
5179    dev_data->memObjMap.erase(mem);
5180}
5181
5182VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5183    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5184    DEVICE_MEM_INFO *mem_info = nullptr;
5185    VK_OBJECT obj_struct;
5186    std::unique_lock<std::mutex> lock(global_lock);
5187    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
5188    if (!skip) {
5189        lock.unlock();
5190        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
5191        lock.lock();
5192        PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
5193    }
5194}
5195
5196// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
5197//  and that the size of the map range should be:
5198//  1. Not zero
5199//  2. Within the size of the memory allocation
5200static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5201    bool skip_call = false;
5202
5203    if (size == 0) {
5204        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5205                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5206                            "VkMapMemory: Attempting to map memory range of size zero");
5207    }
5208
5209    auto mem_element = my_data->memObjMap.find(mem);
5210    if (mem_element != my_data->memObjMap.end()) {
5211        auto mem_info = mem_element->second.get();
5212        // It is an application error to call VkMapMemory on an object that is already mapped
5213        if (mem_info->mem_range.size != 0) {
5214            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5215                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5216                                "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
5217        }
5218
5219        // Validate that offset + size is within object's allocationSize
5220        if (size == VK_WHOLE_SIZE) {
5221            if (offset >= mem_info->alloc_info.allocationSize) {
5222                skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5223                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5224                                    "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5225                                           " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
5226                                    offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
5227            }
5228        } else {
5229            if ((offset + size) > mem_info->alloc_info.allocationSize) {
5230                skip_call =
5231                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5232                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5233                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
5234                            size + offset, mem_info->alloc_info.allocationSize);
5235            }
5236        }
5237    }
5238    return skip_call;
5239}
5240
5241static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5242    auto mem_info = getMemObjInfo(my_data, mem);
5243    if (mem_info) {
5244        mem_info->mem_range.offset = offset;
5245        mem_info->mem_range.size = size;
5246    }
5247}
5248
5249static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5250    bool skip_call = false;
5251    auto mem_info = getMemObjInfo(my_data, mem);
5252    if (mem_info) {
5253        if (!mem_info->mem_range.size) {
5254            // Valid Usage: memory must currently be mapped
5255            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5256                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5257                                "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
5258        }
5259        mem_info->mem_range.size = 0;
5260        if (mem_info->shadow_copy) {
5261            free(mem_info->shadow_copy_base);
5262            mem_info->shadow_copy_base = 0;
5263            mem_info->shadow_copy = 0;
5264        }
5265    }
5266    return skip_call;
5267}
5268
5269// Guard value for pad data
5270static char NoncoherentMemoryFillValue = 0xb;
5271
5272static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5273                                     void **ppData) {
5274    auto mem_info = getMemObjInfo(dev_data, mem);
5275    if (mem_info) {
5276        mem_info->p_driver_data = *ppData;
5277        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
5278        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5279            mem_info->shadow_copy = 0;
5280        } else {
5281            if (size == VK_WHOLE_SIZE) {
5282                size = mem_info->alloc_info.allocationSize - offset;
5283            }
5284            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5285            assert(vk_safe_modulo(mem_info->shadow_pad_size,
5286                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5287            // Ensure start of mapped region reflects hardware alignment constraints
5288            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5289
5290            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5291            uint64_t start_offset = offset % map_alignment;
5292            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
5293            mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
5294
5295            mem_info->shadow_copy =
5296                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5297                                         ~(map_alignment - 1)) + start_offset;
5298            assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5299                                  map_alignment) == 0);
5300
5301            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
5302            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
5303        }
5304    }
5305}
5306
5307// Verify that state for fence being waited on is appropriate. That is,
5308//  a fence being waited on should not already be signaled and
5309//  it should have been submitted on a queue or during acquire next image
5310static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
5311    bool skip_call = false;
5312
5313    auto pFence = getFenceNode(dev_data, fence);
5314    if (pFence) {
5315        if (pFence->state == FENCE_UNSIGNALED) {
5316            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5317                                 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5318                                 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5319                                 "acquire next image.",
5320                                 apiCall, reinterpret_cast<uint64_t &>(fence));
5321        }
5322    }
5323    return skip_call;
5324}
5325
5326static bool RetireFence(layer_data *dev_data, VkFence fence) {
5327    auto pFence = getFenceNode(dev_data, fence);
5328    if (pFence->signaler.first != VK_NULL_HANDLE) {
5329        /* Fence signaller is a queue -- use this as proof that prior operations
5330         * on that queue have completed.
5331         */
5332        return RetireWorkOnQueue(dev_data,
5333                                 getQueueNode(dev_data, pFence->signaler.first),
5334                                 pFence->signaler.second);
5335    }
5336    else {
5337        /* Fence signaller is the WSI. We're not tracking what the WSI op
5338         * actually /was/ in CV yet, but we need to mark the fence as retired.
5339         */
5340        pFence->state = FENCE_RETIRED;
5341        return false;
5342    }
5343}
5344
5345VKAPI_ATTR VkResult VKAPI_CALL
5346WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5347    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5348    bool skip_call = false;
5349    // Verify fence status of submitted fences
5350    std::unique_lock<std::mutex> lock(global_lock);
5351    for (uint32_t i = 0; i < fenceCount; i++) {
5352        skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
5353    }
5354    lock.unlock();
5355    if (skip_call)
5356        return VK_ERROR_VALIDATION_FAILED_EXT;
5357
5358    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5359
5360    if (result == VK_SUCCESS) {
5361        lock.lock();
5362        // When we know that all fences are complete we can clean/remove their CBs
5363        if (waitAll || fenceCount == 1) {
5364            for (uint32_t i = 0; i < fenceCount; i++) {
5365                skip_call |= RetireFence(dev_data, pFences[i]);
5366            }
5367        }
5368        // NOTE : Alternate case not handled here is when some fences have completed. In
5369        //  this case for app to guarantee which fences completed it will have to call
5370        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5371        lock.unlock();
5372    }
5373    if (skip_call)
5374        return VK_ERROR_VALIDATION_FAILED_EXT;
5375    return result;
5376}
5377
5378VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
5379    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5380    bool skip_call = false;
5381    std::unique_lock<std::mutex> lock(global_lock);
5382    skip_call = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5383    lock.unlock();
5384
5385    if (skip_call)
5386        return VK_ERROR_VALIDATION_FAILED_EXT;
5387
5388    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
5389    lock.lock();
5390    if (result == VK_SUCCESS) {
5391        skip_call |= RetireFence(dev_data, fence);
5392    }
5393    lock.unlock();
5394    if (skip_call)
5395        return VK_ERROR_VALIDATION_FAILED_EXT;
5396    return result;
5397}
5398
5399VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5400                                                            VkQueue *pQueue) {
5401    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5402    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5403    std::lock_guard<std::mutex> lock(global_lock);
5404
5405    // Add queue to tracking set only if it is new
5406    auto result = dev_data->queues.emplace(*pQueue);
5407    if (result.second == true) {
5408        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5409        pQNode->queue = *pQueue;
5410        pQNode->queueFamilyIndex = queueFamilyIndex;
5411        pQNode->seq = 0;
5412    }
5413}
5414
5415VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5416    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5417    bool skip_call = false;
5418    std::unique_lock<std::mutex> lock(global_lock);
5419    auto pQueue = getQueueNode(dev_data, queue);
5420    skip_call |= RetireWorkOnQueue(dev_data, pQueue, pQueue->seq + pQueue->submissions.size());
5421    lock.unlock();
5422    if (skip_call)
5423        return VK_ERROR_VALIDATION_FAILED_EXT;
5424    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
5425    return result;
5426}
5427
5428VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5429    bool skip_call = false;
5430    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5431    std::unique_lock<std::mutex> lock(global_lock);
5432    for (auto & queue : dev_data->queueMap) {
5433        skip_call |= RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5434    }
5435    lock.unlock();
5436    if (skip_call)
5437        return VK_ERROR_VALIDATION_FAILED_EXT;
5438    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
5439    return result;
5440}
5441
5442VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5443    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5444    bool skip_call = false;
5445    std::unique_lock<std::mutex> lock(global_lock);
5446    auto fence_pair = dev_data->fenceMap.find(fence);
5447    if (fence_pair != dev_data->fenceMap.end()) {
5448        if (fence_pair->second.state == FENCE_INFLIGHT) {
5449            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5450                                 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5451                                 (uint64_t)(fence));
5452        }
5453        dev_data->fenceMap.erase(fence_pair);
5454    }
5455    lock.unlock();
5456
5457    if (!skip_call)
5458        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
5459}
5460
5461VKAPI_ATTR void VKAPI_CALL
5462DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5463    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5464    bool skip = false;
5465    std::unique_lock<std::mutex> lock(global_lock);
5466    auto sema_node = getSemaphoreNode(dev_data, semaphore);
5467    if (sema_node) {
5468        skip |= ValidateObjectNotInUse(dev_data, sema_node,
5469                                       {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT},
5470                                       VALIDATION_ERROR_00199);
5471    }
5472    if (!skip) {
5473        dev_data->semaphoreMap.erase(semaphore);
5474        lock.unlock();
5475        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
5476    }
5477}
5478
5479static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
5480    *event_state = getEventNode(dev_data, event);
5481    *obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5482    if (dev_data->instance_data->disabled.destroy_event)
5483        return false;
5484    bool skip = false;
5485    if (*event_state) {
5486        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_00213);
5487    }
5488    return skip;
5489}
5490
5491static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
5492    invalidateCommandBuffers(event_state->cb_bindings, obj_struct);
5493    dev_data->eventMap.erase(event);
5494}
5495
5496VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5497    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5498    EVENT_STATE *event_state = nullptr;
5499    VK_OBJECT obj_struct;
5500    std::unique_lock<std::mutex> lock(global_lock);
5501    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
5502    if (!skip) {
5503        lock.unlock();
5504        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
5505        lock.lock();
5506        PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
5507    }
5508}
5509
5510VKAPI_ATTR void VKAPI_CALL
5511DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5512    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5513    bool skip = false;
5514    std::unique_lock<std::mutex> lock(global_lock);
5515    auto qp_node = getQueryPoolNode(dev_data, queryPool);
5516    if (qp_node) {
5517        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5518        skip |= ValidateObjectNotInUse(dev_data, qp_node, obj_struct, VALIDATION_ERROR_01012);
5519        // Any bound cmd buffers are now invalid
5520        invalidateCommandBuffers(qp_node->cb_bindings, obj_struct);
5521    }
5522    if (!skip) {
5523        dev_data->queryPoolMap.erase(queryPool);
5524        lock.unlock();
5525        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
5526    }
5527}
5528
5529VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5530                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5531                                                   VkQueryResultFlags flags) {
5532    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5533    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5534    std::unique_lock<std::mutex> lock(global_lock);
5535    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5536        auto pCB = getCBNode(dev_data, cmdBuffer);
5537        for (auto queryStatePair : pCB->queryToStateMap) {
5538            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5539        }
5540    }
5541    bool skip_call = false;
5542    for (uint32_t i = 0; i < queryCount; ++i) {
5543        QueryObject query = {queryPool, firstQuery + i};
5544        auto queryElement = queriesInFlight.find(query);
5545        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5546        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5547            // Available and in flight
5548            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5549                queryToStateElement->second) {
5550                for (auto cmdBuffer : queryElement->second) {
5551                    auto pCB = getCBNode(dev_data, cmdBuffer);
5552                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5553                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5554                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5555                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5556                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5557                                             (uint64_t)(queryPool), firstQuery + i);
5558                    } else {
5559                        for (auto event : queryEventElement->second) {
5560                            dev_data->eventMap[event].needsSignaled = true;
5561                        }
5562                    }
5563                }
5564                // Unavailable and in flight
5565            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5566                       !queryToStateElement->second) {
5567                // TODO : Can there be the same query in use by multiple command buffers in flight?
5568                bool make_available = false;
5569                for (auto cmdBuffer : queryElement->second) {
5570                    auto pCB = getCBNode(dev_data, cmdBuffer);
5571                    make_available |= pCB->queryToStateMap[query];
5572                }
5573                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5574                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5575                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5576                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5577                                         (uint64_t)(queryPool), firstQuery + i);
5578                }
5579                // Unavailable
5580            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5581                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5582                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5583                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5584                                     (uint64_t)(queryPool), firstQuery + i);
5585                // Unitialized
5586            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5587                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5588                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5589                                     "Cannot get query results on queryPool 0x%" PRIx64
5590                                     " with index %d as data has not been collected for this index.",
5591                                     (uint64_t)(queryPool), firstQuery + i);
5592            }
5593        }
5594    }
5595    lock.unlock();
5596    if (skip_call)
5597        return VK_ERROR_VALIDATION_FAILED_EXT;
5598    return dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5599}
5600
5601static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5602    bool skip_call = false;
5603    auto buffer_node = getBufferNode(my_data, buffer);
5604    if (!buffer_node) {
5605        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5606                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5607                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5608    } else {
5609        if (buffer_node->in_use.load()) {
5610            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5611                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5612                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5613        }
5614    }
5615    return skip_call;
5616}
5617
5618// Return true if given ranges intersect, else false
5619// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5620//  in an error so not checking that here
5621// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5622// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5623//  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5624static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5625    *skip_call = false;
5626    auto r1_start = range1->start;
5627    auto r1_end = range1->end;
5628    auto r2_start = range2->start;
5629    auto r2_end = range2->end;
5630    VkDeviceSize pad_align = 1;
5631    if (range1->linear != range2->linear) {
5632        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5633    }
5634    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5635        return false;
5636    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5637        return false;
5638
5639    if (range1->linear != range2->linear) {
5640        // In linear vs. non-linear case, it's an error to alias
5641        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5642        const char *r1_type_str = range1->image ? "image" : "buffer";
5643        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5644        const char *r2_type_str = range2->image ? "image" : "buffer";
5645        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5646        *skip_call |=
5647            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, range1->handle, 0, MEMTRACK_INVALID_ALIASING,
5648                    "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5649                           " which is in violation of the Buffer-Image Granularity section of the Vulkan specification.",
5650                    r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5651    }
5652    // Ranges intersect
5653    return true;
5654}
5655// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5656static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5657    // Create a local MEMORY_RANGE struct to wrap offset/size
5658    MEMORY_RANGE range_wrap;
5659    // Synch linear with range1 to avoid padding and potential validation error case
5660    range_wrap.linear = range1->linear;
5661    range_wrap.start = offset;
5662    range_wrap.end = end;
5663    bool tmp_bool;
5664    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5665}
5666// For given mem_info, set all ranges valid that intersect [offset-end] range
5667// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5668static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5669    bool tmp_bool = false;
5670    MEMORY_RANGE map_range;
5671    map_range.linear = true;
5672    map_range.start = offset;
5673    map_range.end = end;
5674    for (auto &handle_range_pair : mem_info->bound_ranges) {
5675        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5676            // TODO : WARN here if tmp_bool true?
5677            handle_range_pair.second.valid = true;
5678        }
5679    }
5680}
5681// Object with given handle is being bound to memory w/ given mem_info struct.
5682//  Track the newly bound memory range with given memoryOffset
5683//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5684//  and non-linear range incorrectly overlap.
5685// Return true if an error is flagged and the user callback returns "true", otherwise false
5686// is_image indicates an image object, otherwise handle is for a buffer
5687// is_linear indicates a buffer or linear image
5688static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5689                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5690    bool skip_call = false;
5691    MEMORY_RANGE range;
5692
5693    range.image = is_image;
5694    range.handle = handle;
5695    range.linear = is_linear;
5696    range.valid = mem_info->global_valid;
5697    range.memory = mem_info->mem;
5698    range.start = memoryOffset;
5699    range.size = memRequirements.size;
5700    range.end = memoryOffset + memRequirements.size - 1;
5701    range.aliases.clear();
5702    // Update Memory aliasing
5703    // Save aliase ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5704    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5705    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5706    for (auto &obj_range_pair : mem_info->bound_ranges) {
5707        auto check_range = &obj_range_pair.second;
5708        bool intersection_error = false;
5709        if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5710            skip_call |= intersection_error;
5711            range.aliases.insert(check_range);
5712            tmp_alias_ranges.insert(check_range);
5713        }
5714    }
5715    mem_info->bound_ranges[handle] = std::move(range);
5716    for (auto tmp_range : tmp_alias_ranges) {
5717        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5718    }
5719    if (is_image)
5720        mem_info->bound_images.insert(handle);
5721    else
5722        mem_info->bound_buffers.insert(handle);
5723
5724    return skip_call;
5725}
5726
5727static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5728                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5729    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5730}
5731
5732static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5733                                    VkMemoryRequirements mem_reqs) {
5734    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5735}
5736
5737// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5738//  is_image indicates if handle is for image or buffer
5739//  This function will also remove the handle-to-index mapping from the appropriate
5740//  map and clean up any aliases for range being removed.
5741static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5742    auto erase_range = &mem_info->bound_ranges[handle];
5743    for (auto alias_range : erase_range->aliases) {
5744        alias_range->aliases.erase(erase_range);
5745    }
5746    erase_range->aliases.clear();
5747    mem_info->bound_ranges.erase(handle);
5748    if (is_image) {
5749        mem_info->bound_images.erase(handle);
5750    } else {
5751        mem_info->bound_buffers.erase(handle);
5752    }
5753}
5754
5755static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5756
5757static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5758
5759VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5760                                         const VkAllocationCallbacks *pAllocator) {
5761    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5762    std::unique_lock<std::mutex> lock(global_lock);
5763    if (!validateIdleBuffer(dev_data, buffer)) {
5764        // Clean up memory binding and range information for buffer
5765        auto buff_node = getBufferNode(dev_data, buffer);
5766        if (buff_node) {
5767            // Any bound cmd buffers are now invalid
5768            invalidateCommandBuffers(buff_node->cb_bindings,
5769                                     {reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
5770            auto mem_info = getMemObjInfo(dev_data, buff_node->binding.mem);
5771            if (mem_info) {
5772                RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5773            }
5774            ClearMemoryObjectBindings(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5775            dev_data->bufferMap.erase(buff_node->buffer);
5776        }
5777        lock.unlock();
5778        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
5779    }
5780}
5781
5782static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5783                                             VK_OBJECT *obj_struct) {
5784    *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5785    *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
5786    if (dev_data->instance_data->disabled.destroy_buffer_view)
5787        return false;
5788    bool skip = false;
5789    if (*buffer_view_state) {
5790        skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct, VALIDATION_ERROR_00701);
5791    }
5792    return skip;
5793}
5794
5795static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5796                                            VK_OBJECT obj_struct) {
5797    // Any bound cmd buffers are now invalid
5798    invalidateCommandBuffers(buffer_view_state->cb_bindings, obj_struct);
5799    dev_data->bufferViewMap.erase(buffer_view);
5800}
5801
5802VKAPI_ATTR void VKAPI_CALL
5803DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5804    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5805    // Common data objects used pre & post call
5806    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5807    VK_OBJECT obj_struct;
5808    std::unique_lock<std::mutex> lock(global_lock);
5809    // Validate state before calling down chain, update common data if we'll be calling down chain
5810    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
5811    if (!skip) {
5812        lock.unlock();
5813        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
5814        lock.lock();
5815        PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
5816    }
5817}
5818
5819static bool PreCallValidateDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE **image_state, VK_OBJECT *obj_struct) {
5820    *image_state = getImageState(dev_data, image);
5821    *obj_struct = {reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT};
5822    if (dev_data->instance_data->disabled.destroy_image)
5823        return false;
5824    bool skip = false;
5825    if (*image_state) {
5826        skip |= ValidateObjectNotInUse(dev_data, *image_state, *obj_struct, VALIDATION_ERROR_00743);
5827    }
5828    return skip;
5829}
5830
5831static void PostCallRecordDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VK_OBJECT obj_struct) {
5832    invalidateCommandBuffers(image_state->cb_bindings, obj_struct);
5833    // Clean up memory mapping, bindings and range references for image
5834    auto mem_info = getMemObjInfo(dev_data, image_state->binding.mem);
5835    if (mem_info) {
5836        RemoveImageMemoryRange(obj_struct.handle, mem_info);
5837    }
5838    ClearMemoryObjectBindings(dev_data, obj_struct.handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5839    // Remove image from imageMap
5840    dev_data->imageMap.erase(image);
5841
5842    const auto &sub_entry = dev_data->imageSubresourceMap.find(image);
5843    if (sub_entry != dev_data->imageSubresourceMap.end()) {
5844        for (const auto &pair : sub_entry->second) {
5845            dev_data->imageLayoutMap.erase(pair);
5846        }
5847        dev_data->imageSubresourceMap.erase(sub_entry);
5848    }
5849}
5850
5851VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5852    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5853    IMAGE_STATE *image_state = nullptr;
5854    VK_OBJECT obj_struct;
5855    std::unique_lock<std::mutex> lock(global_lock);
5856    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
5857    if (!skip) {
5858        lock.unlock();
5859        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
5860        lock.lock();
5861        PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
5862    }
5863}
5864
5865static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5866                                  const char *funcName) {
5867    bool skip_call = false;
5868    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5869        skip_call = log_msg(
5870            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5871            reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5872            "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5873            "type (0x%X) of this memory object 0x%" PRIx64 ".",
5874            funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, reinterpret_cast<const uint64_t &>(mem_info->mem));
5875    }
5876    return skip_call;
5877}
5878
5879VKAPI_ATTR VkResult VKAPI_CALL
5880BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5881    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5882    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5883    std::unique_lock<std::mutex> lock(global_lock);
5884    // Track objects tied to memory
5885    uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5886    bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5887    auto buffer_node = getBufferNode(dev_data, buffer);
5888    if (buffer_node) {
5889        VkMemoryRequirements memRequirements;
5890        dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &memRequirements);
5891        buffer_node->binding.mem = mem;
5892        buffer_node->binding.offset = memoryOffset;
5893        buffer_node->binding.size = memRequirements.size;
5894
5895        // Track and validate bound memory range information
5896        auto mem_info = getMemObjInfo(dev_data, mem);
5897        if (mem_info) {
5898            skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, memRequirements);
5899            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5900        }
5901
5902        // Validate memory requirements alignment
5903        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5904            skip_call |=
5905                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5906                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5907                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5908                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5909                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5910                        memoryOffset, memRequirements.alignment);
5911        }
5912
5913        // Validate device limits alignments
5914        static const VkBufferUsageFlagBits usage_list[3] = {
5915            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5916            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5917            VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5918        static const char *memory_type[3] = {"texel",
5919                                             "uniform",
5920                                             "storage"};
5921        static const char *offset_name[3] = {
5922            "minTexelBufferOffsetAlignment",
5923            "minUniformBufferOffsetAlignment",
5924            "minStorageBufferOffsetAlignment"
5925        };
5926
5927        // Keep this one fresh!
5928        const VkDeviceSize offset_requirement[3] = {
5929            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5930            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5931            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5932        };
5933        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5934
5935        for (int i = 0; i < 3; i++) {
5936            if (usage & usage_list[i]) {
5937                if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5938                    skip_call |=
5939                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5940                                0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5941                                "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5942                                "device limit %s 0x%" PRIxLEAST64,
5943                                memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
5944                }
5945            }
5946        }
5947    }
5948    print_mem_list(dev_data);
5949    lock.unlock();
5950    if (!skip_call) {
5951        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
5952    }
5953    return result;
5954}
5955
5956VKAPI_ATTR void VKAPI_CALL
5957GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5958    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5959    // TODO : What to track here?
5960    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5961    my_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5962}
5963
5964VKAPI_ATTR void VKAPI_CALL
5965GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5966    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5967    // TODO : What to track here?
5968    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5969    my_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5970}
5971
5972static bool PreCallValidateDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state,
5973                                            VK_OBJECT *obj_struct) {
5974    *image_view_state = getImageViewState(dev_data, image_view);
5975    *obj_struct = {reinterpret_cast<uint64_t &>(image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5976    if (dev_data->instance_data->disabled.destroy_image_view)
5977        return false;
5978    bool skip = false;
5979    if (*image_view_state) {
5980        skip |= ValidateObjectNotInUse(dev_data, *image_view_state, *obj_struct, VALIDATION_ERROR_00776);
5981    }
5982    return skip;
5983}
5984
5985static void PostCallRecordDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state,
5986                                           VK_OBJECT obj_struct) {
5987    // Any bound cmd buffers are now invalid
5988    invalidateCommandBuffers(image_view_state->cb_bindings, obj_struct);
5989    dev_data->imageViewMap.erase(image_view);
5990}
5991
5992VKAPI_ATTR void VKAPI_CALL
5993DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5994    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5995    // Common data objects used pre & post call
5996    IMAGE_VIEW_STATE *image_view_state = nullptr;
5997    VK_OBJECT obj_struct;
5998    std::unique_lock<std::mutex> lock(global_lock);
5999    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
6000    if (!skip) {
6001        lock.unlock();
6002        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
6003        lock.lock();
6004        PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
6005    }
6006}
6007
6008VKAPI_ATTR void VKAPI_CALL
6009DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
6010    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6011
6012    std::unique_lock<std::mutex> lock(global_lock);
6013    my_data->shaderModuleMap.erase(shaderModule);
6014    lock.unlock();
6015
6016    my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
6017}
6018
6019static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
6020                                           VK_OBJECT *obj_struct) {
6021    *pipeline_state = getPipelineState(dev_data, pipeline);
6022    *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
6023    if (dev_data->instance_data->disabled.destroy_pipeline)
6024        return false;
6025    bool skip = false;
6026    if (*pipeline_state) {
6027        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555);
6028    }
6029    return skip;
6030}
6031
6032static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
6033                                          VK_OBJECT obj_struct) {
6034    // Any bound cmd buffers are now invalid
6035    invalidateCommandBuffers(pipeline_state->cb_bindings, obj_struct);
6036    dev_data->pipelineMap.erase(pipeline);
6037}
6038
6039VKAPI_ATTR void VKAPI_CALL
6040DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6041    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6042    PIPELINE_STATE *pipeline_state = nullptr;
6043    VK_OBJECT obj_struct;
6044    std::unique_lock<std::mutex> lock(global_lock);
6045    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
6046    if (!skip) {
6047        lock.unlock();
6048        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
6049        lock.lock();
6050        PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
6051    }
6052}
6053
6054VKAPI_ATTR void VKAPI_CALL
6055DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6056    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6057    std::unique_lock<std::mutex> lock(global_lock);
6058    dev_data->pipelineLayoutMap.erase(pipelineLayout);
6059    lock.unlock();
6060
6061    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6062}
6063
6064static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
6065                                          VK_OBJECT *obj_struct) {
6066    *sampler_state = getSamplerState(dev_data, sampler);
6067    *obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
6068    if (dev_data->instance_data->disabled.destroy_sampler)
6069        return false;
6070    bool skip = false;
6071    if (*sampler_state) {
6072        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_00837);
6073    }
6074    return skip;
6075}
6076
6077static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
6078                                         VK_OBJECT obj_struct) {
6079    // Any bound cmd buffers are now invalid
6080    if (sampler_state)
6081        invalidateCommandBuffers(sampler_state->cb_bindings, obj_struct);
6082    dev_data->samplerMap.erase(sampler);
6083}
6084
6085VKAPI_ATTR void VKAPI_CALL
6086DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6087    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6088    SAMPLER_STATE *sampler_state = nullptr;
6089    VK_OBJECT obj_struct;
6090    std::unique_lock<std::mutex> lock(global_lock);
6091    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
6092    if (!skip) {
6093        lock.unlock();
6094        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
6095        lock.lock();
6096        PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
6097    }
6098}
6099
6100VKAPI_ATTR void VKAPI_CALL
6101DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6102    // TODO : Clean up any internal data structures using this obj.
6103    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6104        ->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6105}
6106
6107static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
6108                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
6109    *desc_pool_state = getDescriptorPoolState(dev_data, pool);
6110    *obj_struct = {reinterpret_cast<uint64_t &>(pool), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT};
6111    if (dev_data->instance_data->disabled.destroy_descriptor_pool)
6112        return false;
6113    bool skip = false;
6114    if (*desc_pool_state) {
6115        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901);
6116    }
6117    return skip;
6118}
6119
6120static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
6121                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
6122    // Any bound cmd buffers are now invalid
6123    invalidateCommandBuffers(desc_pool_state->cb_bindings, obj_struct);
6124    // Free sets that were in this pool
6125    for (auto ds : desc_pool_state->sets) {
6126        freeDescriptorSet(dev_data, ds);
6127    }
6128    dev_data->descriptorPoolMap.erase(descriptorPool);
6129}
6130
6131VKAPI_ATTR void VKAPI_CALL
6132DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6133    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6134    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
6135    VK_OBJECT obj_struct;
6136    std::unique_lock<std::mutex> lock(global_lock);
6137    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
6138    if (!skip) {
6139        lock.unlock();
6140        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
6141        lock.lock();
6142        PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
6143    }
6144}
6145// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
6146//  If this is a secondary command buffer, then make sure its primary is also in-flight
6147//  If primary is not in-flight, then remove secondary from global in-flight set
6148// This function is only valid at a point when cmdBuffer is being reset or freed
6149static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
6150                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
6151    bool skip_call = false;
6152    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
6153        // Primary CB or secondary where primary is also in-flight is an error
6154        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
6155            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
6156            skip_call |=
6157                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6158                        reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, error_code, "DS",
6159                        "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use. %s", action,
6160                        reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), validation_error_map[error_code]);
6161        }
6162    }
6163    return skip_call;
6164}
6165
6166// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
6167static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
6168                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
6169    bool skip_call = false;
6170    for (auto cmd_buffer : pPool->commandBuffers) {
6171        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
6172            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action, error_code);
6173        }
6174    }
6175    return skip_call;
6176}
6177
6178static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
6179    for (auto cmd_buffer : pPool->commandBuffers) {
6180        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
6181    }
6182}
6183
6184VKAPI_ATTR void VKAPI_CALL
6185FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6186    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6187    bool skip_call = false;
6188    std::unique_lock<std::mutex> lock(global_lock);
6189
6190    for (uint32_t i = 0; i < commandBufferCount; i++) {
6191        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6192        // Delete CB information structure, and remove from commandBufferMap
6193        if (cb_node) {
6194            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_00096);
6195        }
6196    }
6197
6198    if (skip_call)
6199        return;
6200
6201    auto pPool = getCommandPoolNode(dev_data, commandPool);
6202    for (uint32_t i = 0; i < commandBufferCount; i++) {
6203        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6204        // Delete CB information structure, and remove from commandBufferMap
6205        if (cb_node) {
6206            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
6207            // reset prior to delete for data clean-up
6208            resetCB(dev_data, cb_node->commandBuffer);
6209            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
6210            delete cb_node;
6211        }
6212
6213        // Remove commandBuffer reference from commandPoolMap
6214        pPool->commandBuffers.remove(pCommandBuffers[i]);
6215    }
6216    printCBList(dev_data);
6217    lock.unlock();
6218
6219    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6220}
6221
6222VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6223                                                 const VkAllocationCallbacks *pAllocator,
6224                                                 VkCommandPool *pCommandPool) {
6225    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6226
6227    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6228
6229    if (VK_SUCCESS == result) {
6230        std::lock_guard<std::mutex> lock(global_lock);
6231        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6232        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6233    }
6234    return result;
6235}
6236
6237VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6238                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6239
6240    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6241    VkResult result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6242    if (result == VK_SUCCESS) {
6243        std::lock_guard<std::mutex> lock(global_lock);
6244        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6245        qp_node->createInfo = *pCreateInfo;
6246    }
6247    return result;
6248}
6249
6250static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
6251    *cp_state = getCommandPoolNode(dev_data, pool);
6252    if (dev_data->instance_data->disabled.destroy_command_pool)
6253        return false;
6254    bool skip = false;
6255    if (*cp_state) {
6256        // Verify that command buffers in pool are complete (not in-flight)
6257        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_00077);
6258    }
6259    return skip;
6260}
6261
6262static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
6263    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
6264    clearCommandBuffersInFlight(dev_data, cp_state);
6265    for (auto cb : cp_state->commandBuffers) {
6266        clear_cmd_buf_and_mem_references(dev_data, cb);
6267        auto cb_node = getCBNode(dev_data, cb);
6268        // Remove references to this cb_node prior to delete
6269        // TODO : Need better solution here, resetCB?
6270        for (auto obj : cb_node->object_bindings) {
6271            removeCommandBufferBinding(dev_data, &obj, cb_node);
6272        }
6273        for (auto framebuffer : cb_node->framebuffers) {
6274            auto fb_state = getFramebufferState(dev_data, framebuffer);
6275            if (fb_state)
6276                fb_state->cb_bindings.erase(cb_node);
6277        }
6278        dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6279        delete cb_node;                       // delete CB info structure
6280    }
6281    dev_data->commandPoolMap.erase(pool);
6282}
6283
6284// Destroy commandPool along with all of the commandBuffers allocated from that pool
6285VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6286    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6287    COMMAND_POOL_NODE *cp_state = nullptr;
6288    std::unique_lock<std::mutex> lock(global_lock);
6289    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
6290    if (!skip) {
6291        lock.unlock();
6292        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
6293        lock.lock();
6294        PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
6295    }
6296}
6297
6298VKAPI_ATTR VkResult VKAPI_CALL
6299ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6300    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6301    bool skip_call = false;
6302
6303    std::unique_lock<std::mutex> lock(global_lock);
6304    auto pPool = getCommandPoolNode(dev_data, commandPool);
6305    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_00072);
6306    lock.unlock();
6307
6308    if (skip_call)
6309        return VK_ERROR_VALIDATION_FAILED_EXT;
6310
6311    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
6312
6313    // Reset all of the CBs allocated from this pool
6314    if (VK_SUCCESS == result) {
6315        lock.lock();
6316        clearCommandBuffersInFlight(dev_data, pPool);
6317        for (auto cmdBuffer : pPool->commandBuffers) {
6318            resetCB(dev_data, cmdBuffer);
6319        }
6320        lock.unlock();
6321    }
6322    return result;
6323}
6324
6325VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6326    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6327    bool skip_call = false;
6328    std::unique_lock<std::mutex> lock(global_lock);
6329    for (uint32_t i = 0; i < fenceCount; ++i) {
6330        auto pFence = getFenceNode(dev_data, pFences[i]);
6331        if (pFence && pFence->state == FENCE_INFLIGHT) {
6332            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6333                                 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6334                                 "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
6335        }
6336    }
6337    lock.unlock();
6338
6339    if (skip_call)
6340        return VK_ERROR_VALIDATION_FAILED_EXT;
6341
6342    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
6343
6344    if (result == VK_SUCCESS) {
6345        lock.lock();
6346        for (uint32_t i = 0; i < fenceCount; ++i) {
6347            auto pFence = getFenceNode(dev_data, pFences[i]);
6348            if (pFence) {
6349                pFence->state = FENCE_UNSIGNALED;
6350            }
6351        }
6352        lock.unlock();
6353    }
6354
6355    return result;
6356}
6357
6358// For given cb_nodes, invalidate them and track object causing invalidation
6359void invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> cb_nodes, VK_OBJECT obj) {
6360    for (auto cb_node : cb_nodes) {
6361        cb_node->state = CB_INVALID;
6362        cb_node->broken_bindings.push_back(obj);
6363    }
6364}
6365
6366static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
6367                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
6368    *framebuffer_state = getFramebufferState(dev_data, framebuffer);
6369    *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT};
6370    if (dev_data->instance_data->disabled.destroy_framebuffer)
6371        return false;
6372    bool skip = false;
6373    if (*framebuffer_state) {
6374        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422);
6375    }
6376    return skip;
6377}
6378
6379static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
6380                                             VK_OBJECT obj_struct) {
6381    invalidateCommandBuffers(framebuffer_state->cb_bindings, obj_struct);
6382    dev_data->frameBufferMap.erase(framebuffer);
6383}
6384
6385VKAPI_ATTR void VKAPI_CALL
6386DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6387    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6388    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
6389    VK_OBJECT obj_struct;
6390    std::unique_lock<std::mutex> lock(global_lock);
6391    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
6392    if (!skip) {
6393        lock.unlock();
6394        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
6395        lock.lock();
6396        PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
6397    }
6398}
6399
6400static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
6401                                             VK_OBJECT *obj_struct) {
6402    *rp_state = getRenderPassState(dev_data, render_pass);
6403    *obj_struct = {reinterpret_cast<uint64_t &>(render_pass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
6404    if (dev_data->instance_data->disabled.destroy_renderpass)
6405        return false;
6406    bool skip = false;
6407    if (*rp_state) {
6408        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_00393);
6409    }
6410    return skip;
6411}
6412
6413static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
6414                                            VK_OBJECT obj_struct) {
6415    invalidateCommandBuffers(rp_state->cb_bindings, obj_struct);
6416    dev_data->renderPassMap.erase(render_pass);
6417}
6418
6419VKAPI_ATTR void VKAPI_CALL
6420DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6421    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6422    RENDER_PASS_STATE *rp_state = nullptr;
6423    VK_OBJECT obj_struct;
6424    std::unique_lock<std::mutex> lock(global_lock);
6425    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
6426    if (!skip) {
6427        lock.unlock();
6428        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
6429        lock.lock();
6430        PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
6431    }
6432}
6433
6434VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6435                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6436    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6437    // TODO: Add check for VALIDATION_ERROR_00658
6438    // TODO: Add check for VALIDATION_ERROR_00666
6439    // TODO: Add check for VALIDATION_ERROR_00667
6440    // TODO: Add check for VALIDATION_ERROR_00668
6441    // TODO: Add check for VALIDATION_ERROR_00669
6442    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6443
6444    if (VK_SUCCESS == result) {
6445        std::lock_guard<std::mutex> lock(global_lock);
6446        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6447        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(*pBuffer, pCreateInfo))));
6448    }
6449    return result;
6450}
6451
6452static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6453    bool skip_call = false;
6454    BUFFER_NODE *buf_node = getBufferNode(dev_data, pCreateInfo->buffer);
6455    // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6456    if (buf_node) {
6457        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buf_node, "vkCreateBufferView()");
6458        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6459        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6460        skip_call |= ValidateBufferUsageFlags(
6461            dev_data, buf_node, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
6462            VALIDATION_ERROR_00694, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6463    }
6464    return skip_call;
6465}
6466
6467VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6468                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6469    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6470    std::unique_lock<std::mutex> lock(global_lock);
6471    bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6472    lock.unlock();
6473    if (skip_call)
6474        return VK_ERROR_VALIDATION_FAILED_EXT;
6475    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
6476    if (VK_SUCCESS == result) {
6477        lock.lock();
6478        dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6479        lock.unlock();
6480    }
6481    return result;
6482}
6483
6484VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6485                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6486    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6487
6488    VkResult result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
6489
6490    if (VK_SUCCESS == result) {
6491        std::lock_guard<std::mutex> lock(global_lock);
6492        IMAGE_LAYOUT_NODE image_state;
6493        image_state.layout = pCreateInfo->initialLayout;
6494        image_state.format = pCreateInfo->format;
6495        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_STATE>(new IMAGE_STATE(*pImage, pCreateInfo))));
6496        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6497        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6498        dev_data->imageLayoutMap[subpair] = image_state;
6499    }
6500    return result;
6501}
6502
6503static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6504    /* expects global_lock to be held by caller */
6505
6506    auto image_state = getImageState(dev_data, image);
6507    if (image_state) {
6508        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6509         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6510         * the actual values.
6511         */
6512        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6513            range->levelCount = image_state->createInfo.mipLevels - range->baseMipLevel;
6514        }
6515
6516        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6517            range->layerCount = image_state->createInfo.arrayLayers - range->baseArrayLayer;
6518        }
6519    }
6520}
6521
6522// Return the correct layer/level counts if the caller used the special
6523// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6524static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6525                                         VkImage image) {
6526    /* expects global_lock to be held by caller */
6527
6528    *levels = range.levelCount;
6529    *layers = range.layerCount;
6530    auto image_state = getImageState(dev_data, image);
6531    if (image_state) {
6532        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6533            *levels = image_state->createInfo.mipLevels - range.baseMipLevel;
6534        }
6535        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6536            *layers = image_state->createInfo.arrayLayers - range.baseArrayLayer;
6537        }
6538    }
6539}
6540
6541// For the given format verify that the aspect masks make sense
6542static bool ValidateImageAspectMask(layer_data *dev_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
6543                                    const char *func_name) {
6544    bool skip = false;
6545    if (vk_format_is_color(format)) {
6546        if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
6547            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6548                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6549                            "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6550                            validation_error_map[VALIDATION_ERROR_00741]);
6551        }
6552        if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
6553            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6554                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6555                            "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6556                            validation_error_map[VALIDATION_ERROR_00741]);
6557        }
6558    } else if (vk_format_is_depth_and_stencil(format)) {
6559        if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
6560            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6561                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", "%s: Depth/stencil image formats must have "
6562                                                                                        "at least one of VK_IMAGE_ASPECT_DEPTH_BIT "
6563                                                                                        "and VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6564                            func_name, validation_error_map[VALIDATION_ERROR_00741]);
6565        }
6566        if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
6567            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6568                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6569                            "%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
6570                            "VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6571                            func_name, validation_error_map[VALIDATION_ERROR_00741]);
6572        }
6573    } else if (vk_format_is_depth_only(format)) {
6574        if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
6575            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6576                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6577                            "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6578                            validation_error_map[VALIDATION_ERROR_00741]);
6579        }
6580        if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
6581            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6582                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6583                            "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6584                            validation_error_map[VALIDATION_ERROR_00741]);
6585        }
6586    } else if (vk_format_is_stencil_only(format)) {
6587        if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
6588            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6589                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6590                            "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6591                            validation_error_map[VALIDATION_ERROR_00741]);
6592        }
6593        if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
6594            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6595                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6596                            "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6597                            validation_error_map[VALIDATION_ERROR_00741]);
6598        }
6599    }
6600    return skip;
6601}
6602
6603static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info) {
6604    bool skip = false;
6605    IMAGE_STATE *image_state = getImageState(dev_data, create_info->image);
6606    if (image_state) {
6607        skip |= ValidateImageUsageFlags(
6608            dev_data, image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6609                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6610            false, -1, "vkCreateImageView()",
6611            "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6612        // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6613        skip |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCreateImageView()");
6614        // Checks imported from image layer
6615        if (create_info->subresourceRange.baseMipLevel >= image_state->createInfo.mipLevels) {
6616            std::stringstream ss;
6617            ss << "vkCreateImageView called with baseMipLevel " << create_info->subresourceRange.baseMipLevel << " for image "
6618               << create_info->image << " that only has " << image_state->createInfo.mipLevels << " mip levels.";
6619            skip |=
6620                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6621                        VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6622        }
6623        if (create_info->subresourceRange.baseArrayLayer >= image_state->createInfo.arrayLayers) {
6624            std::stringstream ss;
6625            ss << "vkCreateImageView called with baseArrayLayer " << create_info->subresourceRange.baseArrayLayer << " for image "
6626               << create_info->image << " that only has " << image_state->createInfo.arrayLayers << " array layers.";
6627            skip |=
6628                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6629                        VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6630        }
6631        // TODO: Need new valid usage language for levelCount == 0 & layerCount == 0
6632        if (!create_info->subresourceRange.levelCount) {
6633            std::stringstream ss;
6634            ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.levelCount.";
6635            skip |=
6636                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6637                        VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6638        }
6639        if (!create_info->subresourceRange.layerCount) {
6640            std::stringstream ss;
6641            ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.layerCount.";
6642            skip |=
6643                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6644                        VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6645        }
6646
6647        VkImageCreateFlags image_flags = image_state->createInfo.flags;
6648        VkFormat image_format = image_state->createInfo.format;
6649        VkFormat view_format = create_info->format;
6650        VkImageAspectFlags aspect_mask = create_info->subresourceRange.aspectMask;
6651
6652        // Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state
6653        if (image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
6654            // Format MUST be compatible (in the same format compatibility class) as the format the image was created with
6655            if (vk_format_get_compatibility_class(image_format) != vk_format_get_compatibility_class(view_format)) {
6656                std::stringstream ss;
6657                ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
6658                   << " is not in the same format compatibility class as image (" << (uint64_t)create_info->image << ")  format "
6659                   << string_VkFormat(image_format) << ".  Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
6660                   << "can support ImageViews with differing formats but they must be in the same compatibility class.";
6661                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6662                                VALIDATION_ERROR_02171, "IMAGE", "%s %s", ss.str().c_str(),
6663                                validation_error_map[VALIDATION_ERROR_02171]);
6664            }
6665        } else {
6666            // Format MUST be IDENTICAL to the format the image was created with
6667            if (image_format != view_format) {
6668                std::stringstream ss;
6669                ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from image "
6670                   << (uint64_t)create_info->image << " format " << string_VkFormat(image_format)
6671                   << ".  Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
6672                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6673                                VALIDATION_ERROR_02172, "IMAGE", "%s %s", ss.str().c_str(),
6674                                validation_error_map[VALIDATION_ERROR_02172]);
6675            }
6676        }
6677
6678        // Validate correct image aspect bits for desired formats and format consistency
6679        skip |= ValidateImageAspectMask(dev_data, image_state->image, image_format, aspect_mask, "vkCreateImageView()");
6680        if (vk_format_is_color(image_format) && !vk_format_is_color(view_format)) {
6681            std::stringstream ss;
6682            ss << "vkCreateImageView: The image view's format can differ from the parent image's format, but both must be "
6683               << "color formats.  ImageFormat is " << string_VkFormat(image_format) << " ImageViewFormat is "
6684               << string_VkFormat(view_format);
6685            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6686                            (uint64_t)create_info->image, __LINE__, VALIDATION_ERROR_02171, "IMAGE", "%s %s", ss.str().c_str(),
6687                            validation_error_map[VALIDATION_ERROR_02171]);
6688            // TODO:  Uncompressed formats are compatible if they occupy they same number of bits per pixel.
6689            //        Compressed formats are compatible if the only difference between them is the numerical type of
6690            //        the uncompressed pixels (e.g. signed vs. unsigned, or sRGB vs. UNORM encoding).
6691        }
6692    }
6693    return skip;
6694}
6695
6696static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info, VkImageView view) {
6697    dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, create_info));
6698    ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, create_info->image);
6699}
6700
6701VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6702                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6703    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6704    std::unique_lock<std::mutex> lock(global_lock);
6705    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6706    lock.unlock();
6707    if (skip)
6708        return VK_ERROR_VALIDATION_FAILED_EXT;
6709    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
6710    if (VK_SUCCESS == result) {
6711        lock.lock();
6712        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6713        lock.unlock();
6714    }
6715
6716    return result;
6717}
6718
6719VKAPI_ATTR VkResult VKAPI_CALL
6720CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6721    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6722    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
6723    if (VK_SUCCESS == result) {
6724        std::lock_guard<std::mutex> lock(global_lock);
6725        auto &fence_node = dev_data->fenceMap[*pFence];
6726        fence_node.fence = *pFence;
6727        fence_node.createInfo = *pCreateInfo;
6728        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6729    }
6730    return result;
6731}
6732
6733// TODO handle pipeline caches
6734VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6735                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6736    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6737    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6738    return result;
6739}
6740
6741VKAPI_ATTR void VKAPI_CALL
6742DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6743    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6744    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
6745}
6746
6747VKAPI_ATTR VkResult VKAPI_CALL
6748GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6749    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6750    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6751    return result;
6752}
6753
6754VKAPI_ATTR VkResult VKAPI_CALL
6755MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6756    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6757    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6758    return result;
6759}
6760
6761// utility function to set collective state for pipeline
6762void set_pipeline_state(PIPELINE_STATE *pPipe) {
6763    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6764    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6765        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6766            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6767                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6768                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6769                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6770                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6771                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6772                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6773                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6774                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6775                    pPipe->blendConstantsEnabled = true;
6776                }
6777            }
6778        }
6779    }
6780}
6781
6782VKAPI_ATTR VkResult VKAPI_CALL
6783CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6784                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6785                        VkPipeline *pPipelines) {
6786    VkResult result = VK_SUCCESS;
6787    // TODO What to do with pipelineCache?
6788    // The order of operations here is a little convoluted but gets the job done
6789    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
6790    //  2. Create state is then validated (which uses flags setup during shadowing)
6791    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6792    bool skip_call = false;
6793    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6794    vector<PIPELINE_STATE *> pPipeState(count);
6795    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6796
6797    uint32_t i = 0;
6798    std::unique_lock<std::mutex> lock(global_lock);
6799
6800    for (i = 0; i < count; i++) {
6801        pPipeState[i] = new PIPELINE_STATE;
6802        pPipeState[i]->initGraphicsPipeline(&pCreateInfos[i]);
6803        pPipeState[i]->render_pass_ci.initialize(getRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6804        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6805
6806        skip_call |= verifyPipelineCreateState(dev_data, device, pPipeState, i);
6807    }
6808
6809    if (!skip_call) {
6810        lock.unlock();
6811        result =
6812            dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6813        lock.lock();
6814        for (i = 0; i < count; i++) {
6815            pPipeState[i]->pipeline = pPipelines[i];
6816            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6817        }
6818        lock.unlock();
6819    } else {
6820        for (i = 0; i < count; i++) {
6821            delete pPipeState[i];
6822        }
6823        lock.unlock();
6824        return VK_ERROR_VALIDATION_FAILED_EXT;
6825    }
6826    return result;
6827}
6828
6829VKAPI_ATTR VkResult VKAPI_CALL
6830CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6831                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6832                       VkPipeline *pPipelines) {
6833    VkResult result = VK_SUCCESS;
6834    bool skip_call = false;
6835
6836    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6837    vector<PIPELINE_STATE *> pPipeState(count);
6838    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6839
6840    uint32_t i = 0;
6841    std::unique_lock<std::mutex> lock(global_lock);
6842    for (i = 0; i < count; i++) {
6843        // TODO: Verify compute stage bits
6844
6845        // Create and initialize internal tracking data structure
6846        pPipeState[i] = new PIPELINE_STATE;
6847        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
6848        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6849        // memcpy(&pPipeState[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6850
6851        // TODO: Add Compute Pipeline Verification
6852        skip_call |= !validate_compute_pipeline(dev_data->report_data, pPipeState[i], &dev_data->enabled_features,
6853                                                dev_data->shaderModuleMap);
6854        // skip_call |= verifyPipelineCreateState(dev_data, device, pPipeState[i]);
6855    }
6856
6857    if (!skip_call) {
6858        lock.unlock();
6859        result =
6860            dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6861        lock.lock();
6862        for (i = 0; i < count; i++) {
6863            pPipeState[i]->pipeline = pPipelines[i];
6864            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6865        }
6866        lock.unlock();
6867    } else {
6868        for (i = 0; i < count; i++) {
6869            // Clean up any locally allocated data structures
6870            delete pPipeState[i];
6871        }
6872        lock.unlock();
6873        return VK_ERROR_VALIDATION_FAILED_EXT;
6874    }
6875    return result;
6876}
6877
6878VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6879                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6880    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6881    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6882    if (VK_SUCCESS == result) {
6883        std::lock_guard<std::mutex> lock(global_lock);
6884        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
6885    }
6886    return result;
6887}
6888
6889static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
6890    if (dev_data->instance_data->disabled.create_descriptor_set_layout)
6891        return false;
6892    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
6893}
6894
6895static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
6896                                                    VkDescriptorSetLayout set_layout) {
6897    // TODO: Convert this to unique_ptr to avoid leaks
6898    dev_data->descriptorSetLayoutMap[set_layout] = new cvdescriptorset::DescriptorSetLayout(create_info, set_layout);
6899}
6900
6901VKAPI_ATTR VkResult VKAPI_CALL
6902CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6903                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6904    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6905    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6906    std::unique_lock<std::mutex> lock(global_lock);
6907    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
6908    if (!skip) {
6909        lock.unlock();
6910        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6911        if (VK_SUCCESS == result) {
6912            lock.lock();
6913            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
6914        }
6915    }
6916    return result;
6917}
6918
6919// Used by CreatePipelineLayout and CmdPushConstants.
6920// Note that the index argument is optional and only used by CreatePipelineLayout.
6921static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6922                                      const char *caller_name, uint32_t index = 0) {
6923    if (dev_data->instance_data->disabled.push_constant_range)
6924        return false;
6925    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6926    bool skip_call = false;
6927    // Check that offset + size don't exceed the max.
6928    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6929    // TODO : This check combines VALIDATION_ERROR_00877 & 880, need to break out separately
6930    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6931        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6932        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6933            skip_call |=
6934                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6935                        VALIDATION_ERROR_00877, "DS", "%s call has push constants index %u with offset %u and size %u that "
6936                                                      "exceeds this device's maxPushConstantSize of %u. %s",
6937                        caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6938        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6939            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6940                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6941                                                                       "exceeds this device's maxPushConstantSize of %u.",
6942                                 caller_name, offset, size, maxPushConstantsSize);
6943        } else {
6944            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6945                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6946        }
6947    }
6948    // size needs to be non-zero and a multiple of 4.
6949    // TODO : This check combines VALIDATION_ERROR_00878 & 879, need to break out separately
6950    if ((size == 0) || ((size & 0x3) != 0)) {
6951        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6952            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6953                                 VALIDATION_ERROR_00878, "DS", "%s call has push constants index %u with "
6954                                                               "size %u. Size must be greater than zero and a multiple of 4. %s",
6955                                 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
6956        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6957            skip_call |=
6958                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6959                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6960                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6961                        caller_name, size);
6962        } else {
6963            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6964                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6965        }
6966    }
6967    // offset needs to be a multiple of 4.
6968    if ((offset & 0x3) != 0) {
6969        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6970            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6971                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6972                                                                       "offset %u. Offset must be a multiple of 4.",
6973                                 caller_name, index, offset);
6974        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6975            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6976                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6977                                                                       "offset %u. Offset must be a multiple of 4.",
6978                                 caller_name, offset);
6979        } else {
6980            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6981                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6982        }
6983    }
6984    return skip_call;
6985}
6986
6987VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6988                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6989    bool skip_call = false;
6990    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6991    // TODO : Add checks for VALIDATION_ERRORS 865-871
6992    // Push Constant Range checks
6993    uint32_t i, j;
6994    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6995        skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6996                                               pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6997        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6998            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6999                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
7000        }
7001    }
7002    if (skip_call)
7003        return VK_ERROR_VALIDATION_FAILED_EXT;
7004
7005    // Each range has been validated.  Now check for overlap between ranges (if they are good).
7006    // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
7007    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
7008        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
7009            const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
7010            const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
7011            const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
7012            const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
7013            if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
7014                skip_call |=
7015                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7016                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
7017                                                                  "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
7018                            i, minA, maxA, j, minB, maxB);
7019            }
7020        }
7021    }
7022
7023    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
7024    if (VK_SUCCESS == result) {
7025        std::lock_guard<std::mutex> lock(global_lock);
7026        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
7027        plNode.layout = *pPipelineLayout;
7028        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
7029        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
7030            plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
7031        }
7032        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
7033        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
7034            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
7035        }
7036    }
7037    return result;
7038}
7039
7040VKAPI_ATTR VkResult VKAPI_CALL
7041CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
7042                     VkDescriptorPool *pDescriptorPool) {
7043    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7044    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
7045    if (VK_SUCCESS == result) {
7046        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7047                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
7048                    (uint64_t)*pDescriptorPool))
7049            return VK_ERROR_VALIDATION_FAILED_EXT;
7050        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
7051        if (NULL == pNewNode) {
7052            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7053                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
7054                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
7055                return VK_ERROR_VALIDATION_FAILED_EXT;
7056        } else {
7057            std::lock_guard<std::mutex> lock(global_lock);
7058            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
7059        }
7060    } else {
7061        // Need to do anything if pool create fails?
7062    }
7063    return result;
7064}
7065
7066VKAPI_ATTR VkResult VKAPI_CALL
7067ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
7068    // TODO : Add checks for VALIDATION_ERROR_00928
7069    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7070    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
7071    if (VK_SUCCESS == result) {
7072        std::lock_guard<std::mutex> lock(global_lock);
7073        clearDescriptorPool(dev_data, device, descriptorPool, flags);
7074    }
7075    return result;
7076}
7077// Ensure the pool contains enough descriptors and descriptor sets to satisfy
7078// an allocation request. Fills common_data with the total number of descriptors of each type required,
7079// as well as DescriptorSetLayout ptrs used for later update.
7080static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
7081                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
7082    if (dev_data->instance_data->disabled.allocate_descriptor_sets)
7083        return false;
7084    // All state checks for AllocateDescriptorSets is done in single function
7085    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
7086}
7087// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
7088static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
7089                                                 VkDescriptorSet *pDescriptorSets,
7090                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
7091    // All the updates are contained in a single cvdescriptorset function
7092    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
7093                                                   &dev_data->setMap, dev_data);
7094}
7095
7096VKAPI_ATTR VkResult VKAPI_CALL
7097AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
7098    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7099    std::unique_lock<std::mutex> lock(global_lock);
7100    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
7101    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
7102    lock.unlock();
7103
7104    if (skip_call)
7105        return VK_ERROR_VALIDATION_FAILED_EXT;
7106
7107    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
7108
7109    if (VK_SUCCESS == result) {
7110        lock.lock();
7111        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
7112        lock.unlock();
7113    }
7114    return result;
7115}
7116// Verify state before freeing DescriptorSets
7117static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
7118                                              const VkDescriptorSet *descriptor_sets) {
7119    if (dev_data->instance_data->disabled.free_descriptor_sets)
7120        return false;
7121    bool skip_call = false;
7122    // First make sure sets being destroyed are not currently in-use
7123    for (uint32_t i = 0; i < count; ++i)
7124        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
7125
7126    DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
7127    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
7128        // Can't Free from a NON_FREE pool
7129        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7130                             reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
7131                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
7132                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
7133                             validation_error_map[VALIDATION_ERROR_00922]);
7134    }
7135    return skip_call;
7136}
7137// Sets have been removed from the pool so update underlying state
7138static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
7139                                             const VkDescriptorSet *descriptor_sets) {
7140    DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
7141    // Update available descriptor sets in pool
7142    pool_state->availableSets += count;
7143
7144    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
7145    for (uint32_t i = 0; i < count; ++i) {
7146        auto set_state = dev_data->setMap[descriptor_sets[i]];
7147        uint32_t type_index = 0, descriptor_count = 0;
7148        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
7149            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
7150            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
7151            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
7152        }
7153        freeDescriptorSet(dev_data, set_state);
7154        pool_state->sets.erase(set_state);
7155    }
7156}
7157
7158VKAPI_ATTR VkResult VKAPI_CALL
7159FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
7160    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7161    // Make sure that no sets being destroyed are in-flight
7162    std::unique_lock<std::mutex> lock(global_lock);
7163    bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
7164    lock.unlock();
7165
7166    if (skip_call)
7167        return VK_ERROR_VALIDATION_FAILED_EXT;
7168    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
7169    if (VK_SUCCESS == result) {
7170        lock.lock();
7171        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
7172        lock.unlock();
7173    }
7174    return result;
7175}
7176// TODO : This is a Proof-of-concept for core validation architecture
7177//  Really we'll want to break out these functions to separate files but
7178//  keeping it all together here to prove out design
7179// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
7180static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7181                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7182                                                const VkCopyDescriptorSet *pDescriptorCopies) {
7183    if (dev_data->instance_data->disabled.update_descriptor_sets)
7184        return false;
7185    // First thing to do is perform map look-ups.
7186    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
7187    //  so we can't just do a single map look-up up-front, but do them individually in functions below
7188
7189    // Now make call(s) that validate state, but don't perform state updates in this function
7190    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
7191    //  namespace which will parse params and make calls into specific class instances
7192    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
7193                                                         descriptorCopyCount, pDescriptorCopies);
7194}
7195// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
7196static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7197                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7198                                               const VkCopyDescriptorSet *pDescriptorCopies) {
7199    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7200                                                 pDescriptorCopies);
7201}
7202
7203VKAPI_ATTR void VKAPI_CALL
7204UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
7205                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
7206    // Only map look-up at top level is for device-level layer_data
7207    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7208    std::unique_lock<std::mutex> lock(global_lock);
7209    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7210                                                         pDescriptorCopies);
7211    lock.unlock();
7212    if (!skip_call) {
7213        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7214                                                      pDescriptorCopies);
7215        lock.lock();
7216        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
7217        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7218                                           pDescriptorCopies);
7219    }
7220}
7221
7222VKAPI_ATTR VkResult VKAPI_CALL
7223AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
7224    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7225    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
7226    if (VK_SUCCESS == result) {
7227        std::unique_lock<std::mutex> lock(global_lock);
7228        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
7229
7230        if (pPool) {
7231            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
7232                // Add command buffer to its commandPool map
7233                pPool->commandBuffers.push_back(pCommandBuffer[i]);
7234                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
7235                // Add command buffer to map
7236                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
7237                resetCB(dev_data, pCommandBuffer[i]);
7238                pCB->createInfo = *pCreateInfo;
7239                pCB->device = device;
7240            }
7241        }
7242        printCBList(dev_data);
7243        lock.unlock();
7244    }
7245    return result;
7246}
7247
7248// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
7249static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
7250    addCommandBufferBinding(&fb_state->cb_bindings,
7251                            {reinterpret_cast<uint64_t &>(fb_state->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT},
7252                            cb_state);
7253    for (auto attachment : fb_state->attachments) {
7254        auto view_state = attachment.view_state;
7255        if (view_state) {
7256            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
7257        }
7258        auto rp_state = getRenderPassState(dev_data, fb_state->createInfo.renderPass);
7259        if (rp_state) {
7260            addCommandBufferBinding(
7261                &rp_state->cb_bindings,
7262                {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7263        }
7264    }
7265}
7266
7267VKAPI_ATTR VkResult VKAPI_CALL
7268BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
7269    bool skip_call = false;
7270    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7271    std::unique_lock<std::mutex> lock(global_lock);
7272    // Validate command buffer level
7273    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
7274    if (cb_node) {
7275        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
7276        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7277            skip_call |=
7278                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7279                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7280                        "Calling vkBeginCommandBuffer() on active command buffer 0x%p before it has completed. "
7281                        "You must check command buffer fence before this call.",
7282                        commandBuffer);
7283        }
7284        clear_cmd_buf_and_mem_references(dev_data, cb_node);
7285        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7286            // Secondary Command Buffer
7287            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
7288            if (!pInfo) {
7289                skip_call |=
7290                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7291                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7292                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
7293                            reinterpret_cast<void *>(commandBuffer));
7294            } else {
7295                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
7296                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
7297                        skip_call |= log_msg(
7298                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7299                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7300                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
7301                            reinterpret_cast<void *>(commandBuffer));
7302                    }
7303                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
7304                        skip_call |= log_msg(
7305                            dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7306                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7307                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
7308                            "valid framebuffer parameter is specified.",
7309                            reinterpret_cast<void *>(commandBuffer));
7310                    } else {
7311                        string errorString = "";
7312                        auto framebuffer = getFramebufferState(dev_data, pInfo->framebuffer);
7313                        if (framebuffer) {
7314                            if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
7315                                !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
7316                                                                 getRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
7317                                                                 errorString)) {
7318                                // renderPass that framebuffer was created with must be compatible with local renderPass
7319                                skip_call |= log_msg(
7320                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7321                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7322                                    __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
7323                                    "vkBeginCommandBuffer(): Secondary Command "
7324                                    "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
7325                                    "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
7326                                    reinterpret_cast<void *>(commandBuffer), reinterpret_cast<const uint64_t &>(pInfo->renderPass),
7327                                    reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
7328                                    reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str());
7329                            }
7330                            // Connect this framebuffer and its children to this cmdBuffer
7331                            AddFramebufferBinding(dev_data, cb_node, framebuffer);
7332                        }
7333                    }
7334                }
7335                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
7336                     dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
7337                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7338                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7339                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7340                                         __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7341                                         "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
7342                                         "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7343                                         "support precise occlusion queries.",
7344                                         reinterpret_cast<void *>(commandBuffer));
7345                }
7346            }
7347            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7348                auto renderPass = getRenderPassState(dev_data, pInfo->renderPass);
7349                if (renderPass) {
7350                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
7351                        skip_call |= log_msg(
7352                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7353                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7354                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
7355                            "that is less than the number of subpasses (%d).",
7356                            (void *)commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount);
7357                    }
7358                }
7359            }
7360        }
7361        if (CB_RECORDING == cb_node->state) {
7362            skip_call |=
7363                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7364                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7365                        "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%" PRIxLEAST64
7366                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7367                        (uint64_t)commandBuffer);
7368        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->cmds.back().type)) {
7369            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
7370            auto pPool = getCommandPoolNode(dev_data, cmdPool);
7371            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7372                skip_call |=
7373                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7374                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7375                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
7376                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
7377                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7378                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7379            }
7380            resetCB(dev_data, commandBuffer);
7381        }
7382        // Set updated state here in case implicit reset occurs above
7383        cb_node->state = CB_RECORDING;
7384        cb_node->beginInfo = *pBeginInfo;
7385        if (cb_node->beginInfo.pInheritanceInfo) {
7386            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7387            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
7388            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
7389            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7390                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7391                cb_node->activeRenderPass = getRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
7392                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
7393                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
7394                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
7395            }
7396        }
7397    } else {
7398        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7399                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7400                             "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for command buffer 0x%p!",
7401                             (void *)commandBuffer);
7402    }
7403    lock.unlock();
7404    if (skip_call) {
7405        return VK_ERROR_VALIDATION_FAILED_EXT;
7406    }
7407    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
7408
7409    return result;
7410}
7411
7412VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
7413    bool skip_call = false;
7414    VkResult result = VK_SUCCESS;
7415    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7416    std::unique_lock<std::mutex> lock(global_lock);
7417    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7418    if (pCB) {
7419        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7420            // This needs spec clarification to update valid usage, see comments in PR:
7421            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
7422            skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
7423        }
7424        skip_call |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7425        for (auto query : pCB->activeQueries) {
7426            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7427                                 DRAWSTATE_INVALID_QUERY, "DS",
7428                                 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
7429                                 (uint64_t)(query.pool), query.index);
7430        }
7431    }
7432    if (!skip_call) {
7433        lock.unlock();
7434        result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
7435        lock.lock();
7436        if (VK_SUCCESS == result) {
7437            pCB->state = CB_RECORDED;
7438            // Reset CB status flags
7439            pCB->status = 0;
7440            printCB(dev_data, commandBuffer);
7441        }
7442    } else {
7443        result = VK_ERROR_VALIDATION_FAILED_EXT;
7444    }
7445    lock.unlock();
7446    return result;
7447}
7448
7449VKAPI_ATTR VkResult VKAPI_CALL
7450ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7451    bool skip_call = false;
7452    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7453    std::unique_lock<std::mutex> lock(global_lock);
7454    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7455    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7456    auto pPool = getCommandPoolNode(dev_data, cmdPool);
7457    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7458        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7459                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7460                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
7461                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7462                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
7463    }
7464    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_00092);
7465    lock.unlock();
7466    if (skip_call)
7467        return VK_ERROR_VALIDATION_FAILED_EXT;
7468    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
7469    if (VK_SUCCESS == result) {
7470        lock.lock();
7471        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
7472        resetCB(dev_data, commandBuffer);
7473        lock.unlock();
7474    }
7475    return result;
7476}
7477
7478VKAPI_ATTR void VKAPI_CALL
7479CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7480    bool skip = false;
7481    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7482    std::unique_lock<std::mutex> lock(global_lock);
7483    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
7484    if (cb_state) {
7485        skip |= addCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7486        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
7487            skip |=
7488                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7489                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7490                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7491                        (uint64_t)pipeline, (uint64_t)cb_state->activeRenderPass->renderPass);
7492        }
7493
7494        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
7495        if (pipe_state) {
7496            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
7497            set_cb_pso_status(cb_state, pipe_state);
7498            set_pipeline_state(pipe_state);
7499        } else {
7500            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7501                            (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7502                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7503        }
7504        addCommandBufferBinding(&pipe_state->cb_bindings,
7505                                {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, cb_state);
7506        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
7507            // Add binding for child renderpass
7508            auto rp_state = getRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
7509            if (rp_state) {
7510                addCommandBufferBinding(
7511                    &rp_state->cb_bindings,
7512                    {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7513            }
7514        }
7515    }
7516    lock.unlock();
7517    if (!skip)
7518        dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7519}
7520
7521VKAPI_ATTR void VKAPI_CALL
7522CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7523    bool skip_call = false;
7524    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7525    std::unique_lock<std::mutex> lock(global_lock);
7526    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7527    if (pCB) {
7528        skip_call |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7529        pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
7530    }
7531    lock.unlock();
7532    if (!skip_call)
7533        dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7534}
7535
7536VKAPI_ATTR void VKAPI_CALL
7537CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7538    bool skip_call = false;
7539    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7540    std::unique_lock<std::mutex> lock(global_lock);
7541    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7542    if (pCB) {
7543        skip_call |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7544        pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
7545    }
7546    lock.unlock();
7547    if (!skip_call)
7548        dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7549}
7550
7551VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7552    bool skip_call = false;
7553    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7554    std::unique_lock<std::mutex> lock(global_lock);
7555    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7556    if (pCB) {
7557        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7558        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7559
7560        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
7561        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7562            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7563                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
7564                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7565                                 "flag.  This is undefined behavior and could be ignored.");
7566        } else {
7567            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7568        }
7569    }
7570    lock.unlock();
7571    if (!skip_call)
7572        dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
7573}
7574
7575VKAPI_ATTR void VKAPI_CALL
7576CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7577    bool skip_call = false;
7578    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7579    std::unique_lock<std::mutex> lock(global_lock);
7580    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7581    if (pCB) {
7582        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7583        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7584    }
7585    lock.unlock();
7586    if (!skip_call)
7587        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
7588}
7589
7590VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7591    bool skip_call = false;
7592    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7593    std::unique_lock<std::mutex> lock(global_lock);
7594    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7595    if (pCB) {
7596        skip_call |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7597        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7598    }
7599    lock.unlock();
7600    if (!skip_call)
7601        dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
7602}
7603
7604VKAPI_ATTR void VKAPI_CALL
7605CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7606    bool skip_call = false;
7607    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7608    std::unique_lock<std::mutex> lock(global_lock);
7609    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7610    if (pCB) {
7611        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7612        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7613    }
7614    lock.unlock();
7615    if (!skip_call)
7616        dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7617}
7618
7619VKAPI_ATTR void VKAPI_CALL
7620CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7621    bool skip_call = false;
7622    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7623    std::unique_lock<std::mutex> lock(global_lock);
7624    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7625    if (pCB) {
7626        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7627        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7628    }
7629    lock.unlock();
7630    if (!skip_call)
7631        dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7632}
7633
7634VKAPI_ATTR void VKAPI_CALL
7635CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7636    bool skip_call = false;
7637    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7638    std::unique_lock<std::mutex> lock(global_lock);
7639    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7640    if (pCB) {
7641        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7642        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7643    }
7644    lock.unlock();
7645    if (!skip_call)
7646        dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7647}
7648
7649VKAPI_ATTR void VKAPI_CALL
7650CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7651    bool skip_call = false;
7652    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7653    std::unique_lock<std::mutex> lock(global_lock);
7654    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7655    if (pCB) {
7656        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7657        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7658    }
7659    lock.unlock();
7660    if (!skip_call)
7661        dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
7662}
7663
7664VKAPI_ATTR void VKAPI_CALL
7665CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7666                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7667                      const uint32_t *pDynamicOffsets) {
7668    bool skip_call = false;
7669    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7670    std::unique_lock<std::mutex> lock(global_lock);
7671    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7672    if (pCB) {
7673        if (pCB->state == CB_RECORDING) {
7674            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7675            uint32_t totalDynamicDescriptors = 0;
7676            string errorString = "";
7677            uint32_t lastSetIndex = firstSet + setCount - 1;
7678            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7679                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7680                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7681            }
7682            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7683            auto pipeline_layout = getPipelineLayout(dev_data, layout);
7684            for (uint32_t i = 0; i < setCount; i++) {
7685                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7686                if (pSet) {
7687                    pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7688                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
7689                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7690                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7691                                         DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
7692                                         (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7693                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
7694                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7695                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7696                                             DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7697                                             "Descriptor Set 0x%" PRIxLEAST64
7698                                             " bound but it was never updated. You may want to either update it or not bind it.",
7699                                             (uint64_t)pDescriptorSets[i]);
7700                    }
7701                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7702                    if (!verify_set_layout_compatibility(dev_data, pSet, pipeline_layout, i + firstSet, errorString)) {
7703                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7704                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7705                                             DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7706                                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7707                                             "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
7708                                             i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
7709                    }
7710
7711                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
7712
7713                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7714
7715                    if (setDynamicDescriptorCount) {
7716                        // First make sure we won't overstep bounds of pDynamicOffsets array
7717                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7718                            skip_call |=
7719                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7720                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7721                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7722                                        "descriptorSet #%u (0x%" PRIxLEAST64
7723                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7724                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7725                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
7726                                        (dynamicOffsetCount - totalDynamicDescriptors));
7727                        } else { // Validate and store dynamic offsets with the set
7728                            // Validate Dynamic Offset Minimums
7729                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7730                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
7731                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7732                                    if (vk_safe_modulo(
7733                                            pDynamicOffsets[cur_dyn_offset],
7734                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7735                                        skip_call |= log_msg(
7736                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7737                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7738                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7739                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7740                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
7741                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7742                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7743                                    }
7744                                    cur_dyn_offset++;
7745                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7746                                    if (vk_safe_modulo(
7747                                            pDynamicOffsets[cur_dyn_offset],
7748                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7749                                        skip_call |= log_msg(
7750                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7751                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7752                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7753                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7754                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
7755                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7756                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7757                                    }
7758                                    cur_dyn_offset++;
7759                                }
7760                            }
7761
7762                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7763                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7764                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7765                            // Keep running total of dynamic descriptor count to verify at the end
7766                            totalDynamicDescriptors += setDynamicDescriptorCount;
7767
7768                        }
7769                    }
7770                } else {
7771                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7772                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7773                                         DRAWSTATE_INVALID_SET, "DS", "Attempt to bind descriptor set 0x%" PRIxLEAST64
7774                                         " that doesn't exist!",
7775                                         (uint64_t)pDescriptorSets[i]);
7776                }
7777                skip_call |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7778                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7779                if (firstSet > 0) { // Check set #s below the first bound set
7780                    for (uint32_t i = 0; i < firstSet; ++i) {
7781                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7782                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7783                                                             pipeline_layout, i, errorString)) {
7784                            skip_call |= log_msg(
7785                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7786                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7787                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7788                                "DescriptorSet 0x%" PRIxLEAST64
7789                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7790                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7791                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7792                        }
7793                    }
7794                }
7795                // Check if newly last bound set invalidates any remaining bound sets
7796                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7797                    if (oldFinalBoundSet &&
7798                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7799                        auto old_set = oldFinalBoundSet->GetSet();
7800                        skip_call |=
7801                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7802                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7803                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
7804                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7805                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7806                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7807                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7808                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7809                                    lastSetIndex + 1, (uint64_t)layout);
7810                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7811                    }
7812                }
7813            }
7814            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7815            if (totalDynamicDescriptors != dynamicOffsetCount) {
7816                skip_call |=
7817                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7818                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7819                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7820                            "is %u. It should exactly match the number of dynamic descriptors.",
7821                            setCount, totalDynamicDescriptors, dynamicOffsetCount);
7822            }
7823        } else {
7824            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7825        }
7826    }
7827    lock.unlock();
7828    if (!skip_call)
7829        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7830                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7831}
7832
7833VKAPI_ATTR void VKAPI_CALL
7834CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7835    bool skip_call = false;
7836    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7837    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7838    std::unique_lock<std::mutex> lock(global_lock);
7839
7840    auto buff_node = getBufferNode(dev_data, buffer);
7841    auto cb_node = getCBNode(dev_data, commandBuffer);
7842    if (cb_node && buff_node) {
7843        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7844        std::function<bool()> function = [=]() {
7845            return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7846        };
7847        cb_node->validate_functions.push_back(function);
7848        skip_call |= addCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7849        VkDeviceSize offset_align = 0;
7850        switch (indexType) {
7851        case VK_INDEX_TYPE_UINT16:
7852            offset_align = 2;
7853            break;
7854        case VK_INDEX_TYPE_UINT32:
7855            offset_align = 4;
7856            break;
7857        default:
7858            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7859            break;
7860        }
7861        if (!offset_align || (offset % offset_align)) {
7862            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7863                                 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7864                                 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7865                                 offset, string_VkIndexType(indexType));
7866        }
7867        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7868    } else {
7869        assert(0);
7870    }
7871    lock.unlock();
7872    if (!skip_call)
7873        dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7874}
7875
7876void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7877    uint32_t end = firstBinding + bindingCount;
7878    if (pCB->currentDrawData.buffers.size() < end) {
7879        pCB->currentDrawData.buffers.resize(end);
7880    }
7881    for (uint32_t i = 0; i < bindingCount; ++i) {
7882        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7883    }
7884}
7885
7886static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7887
7888VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7889                                                uint32_t bindingCount, const VkBuffer *pBuffers,
7890                                                const VkDeviceSize *pOffsets) {
7891    bool skip_call = false;
7892    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7893    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7894    std::unique_lock<std::mutex> lock(global_lock);
7895
7896    auto cb_node = getCBNode(dev_data, commandBuffer);
7897    if (cb_node) {
7898        for (uint32_t i = 0; i < bindingCount; ++i) {
7899            auto buff_node = getBufferNode(dev_data, pBuffers[i]);
7900            assert(buff_node);
7901            skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7902            std::function<bool()> function = [=]() {
7903                return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7904            };
7905            cb_node->validate_functions.push_back(function);
7906        }
7907        addCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7908        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7909    } else {
7910        skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7911    }
7912    lock.unlock();
7913    if (!skip_call)
7914        dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7915}
7916
7917/* expects global_lock to be held by caller */
7918static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7919    bool skip_call = false;
7920
7921    for (auto imageView : pCB->updateImages) {
7922        auto view_state = getImageViewState(dev_data, imageView);
7923        if (!view_state)
7924            continue;
7925
7926        auto image_state = getImageState(dev_data, view_state->create_info.image);
7927        assert(image_state);
7928        std::function<bool()> function = [=]() {
7929            SetImageMemoryValid(dev_data, image_state, true);
7930            return false;
7931        };
7932        pCB->validate_functions.push_back(function);
7933    }
7934    for (auto buffer : pCB->updateBuffers) {
7935        auto buff_node = getBufferNode(dev_data, buffer);
7936        assert(buff_node);
7937        std::function<bool()> function = [=]() {
7938            SetBufferMemoryValid(dev_data, buff_node, true);
7939            return false;
7940        };
7941        pCB->validate_functions.push_back(function);
7942    }
7943    return skip_call;
7944}
7945
7946VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7947                                   uint32_t firstVertex, uint32_t firstInstance) {
7948    bool skip_call = false;
7949    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7950    std::unique_lock<std::mutex> lock(global_lock);
7951    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7952    if (pCB) {
7953        skip_call |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7954        pCB->drawCount[DRAW]++;
7955        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
7956        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7957        // TODO : Need to pass commandBuffer as srcObj here
7958        skip_call |=
7959            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7960                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting descriptor set state:",
7961                    g_drawCount[DRAW]++);
7962        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7963        if (!skip_call) {
7964            updateResourceTrackingOnDraw(pCB);
7965        }
7966        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7967    }
7968    lock.unlock();
7969    if (!skip_call)
7970        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7971}
7972
7973VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7974                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7975                                                            uint32_t firstInstance) {
7976    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7977    bool skip_call = false;
7978    std::unique_lock<std::mutex> lock(global_lock);
7979    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7980    if (pCB) {
7981        skip_call |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7982        pCB->drawCount[DRAW_INDEXED]++;
7983        skip_call |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
7984        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7985        // TODO : Need to pass commandBuffer as srcObj here
7986        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7987                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7988                             "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting descriptor set state:",
7989                             g_drawCount[DRAW_INDEXED]++);
7990        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7991        if (!skip_call) {
7992            updateResourceTrackingOnDraw(pCB);
7993        }
7994        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7995    }
7996    lock.unlock();
7997    if (!skip_call)
7998        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
7999}
8000
8001VKAPI_ATTR void VKAPI_CALL
8002CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
8003    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8004    bool skip_call = false;
8005    std::unique_lock<std::mutex> lock(global_lock);
8006
8007    auto cb_node = getCBNode(dev_data, commandBuffer);
8008    auto buff_node = getBufferNode(dev_data, buffer);
8009    if (cb_node && buff_node) {
8010        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndirect()");
8011        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
8012        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
8013        cb_node->drawCount[DRAW_INDIRECT]++;
8014        skip_call |= validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
8015        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
8016        // TODO : Need to pass commandBuffer as srcObj here
8017        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
8018                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
8019                             "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting descriptor set state:",
8020                             g_drawCount[DRAW_INDIRECT]++);
8021        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
8022        if (!skip_call) {
8023            updateResourceTrackingOnDraw(cb_node);
8024        }
8025        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndirect()");
8026    } else {
8027        assert(0);
8028    }
8029    lock.unlock();
8030    if (!skip_call)
8031        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
8032}
8033
8034VKAPI_ATTR void VKAPI_CALL
8035CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
8036    bool skip_call = false;
8037    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8038    std::unique_lock<std::mutex> lock(global_lock);
8039
8040    auto cb_node = getCBNode(dev_data, commandBuffer);
8041    auto buff_node = getBufferNode(dev_data, buffer);
8042    if (cb_node && buff_node) {
8043        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndexedIndirect()");
8044        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
8045        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
8046        cb_node->drawCount[DRAW_INDEXED_INDIRECT]++;
8047        skip_call |=
8048            validate_and_update_draw_state(dev_data, cb_node, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
8049        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
8050        // TODO : Need to pass commandBuffer as srcObj here
8051        skip_call |=
8052            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8053                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting descriptor set state:",
8054                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
8055        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
8056        if (!skip_call) {
8057            updateResourceTrackingOnDraw(cb_node);
8058        }
8059        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndexedIndirect()");
8060    } else {
8061        assert(0);
8062    }
8063    lock.unlock();
8064    if (!skip_call)
8065        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
8066}
8067
8068VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
8069    bool skip_call = false;
8070    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8071    std::unique_lock<std::mutex> lock(global_lock);
8072    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8073    if (pCB) {
8074        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
8075        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
8076        skip_call |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
8077        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
8078    }
8079    lock.unlock();
8080    if (!skip_call)
8081        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
8082}
8083
8084VKAPI_ATTR void VKAPI_CALL
8085CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
8086    bool skip_call = false;
8087    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8088    std::unique_lock<std::mutex> lock(global_lock);
8089
8090    auto cb_node = getCBNode(dev_data, commandBuffer);
8091    auto buff_node = getBufferNode(dev_data, buffer);
8092    if (cb_node && buff_node) {
8093        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDispatchIndirect()");
8094        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
8095        skip_call |=
8096            validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
8097        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
8098        skip_call |= addCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
8099        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdDispatchIndirect()");
8100    }
8101    lock.unlock();
8102    if (!skip_call)
8103        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
8104}
8105
8106VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
8107                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
8108    bool skip_call = false;
8109    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8110    std::unique_lock<std::mutex> lock(global_lock);
8111
8112    auto cb_node = getCBNode(dev_data, commandBuffer);
8113    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
8114    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8115    if (cb_node && src_buff_node && dst_buff_node) {
8116        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBuffer()");
8117        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyBuffer()");
8118        // Update bindings between buffers and cmd buffer
8119        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
8120        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8121        // Validate that SRC & DST buffers have correct usage flags set
8122        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8123                                              VALIDATION_ERROR_01164, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8124        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8125                                              VALIDATION_ERROR_01165, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8126
8127        std::function<bool()> function = [=]() {
8128            return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBuffer()");
8129        };
8130        cb_node->validate_functions.push_back(function);
8131        function = [=]() {
8132            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8133            return false;
8134        };
8135        cb_node->validate_functions.push_back(function);
8136
8137        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
8138        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()");
8139    } else {
8140        // Param_checker will flag errors on invalid objects, just assert here as debugging aid
8141        assert(0);
8142    }
8143    lock.unlock();
8144    if (!skip_call)
8145        dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
8146}
8147
8148static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
8149                                    VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) {
8150    bool skip_call = false;
8151
8152    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8153        uint32_t layer = i + subLayers.baseArrayLayer;
8154        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8155        IMAGE_CMD_BUF_LAYOUT_NODE node;
8156        if (!FindLayout(cb_node, srcImage, sub, node)) {
8157            SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
8158            continue;
8159        }
8160        if (node.layout != srcImageLayout) {
8161            // TODO: Improve log message in the next pass
8162            skip_call |=
8163                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8164                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
8165                                                                        "and doesn't match the current layout %s.",
8166                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
8167        }
8168    }
8169    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
8170        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
8171            // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
8172            auto image_state = getImageState(dev_data, srcImage);
8173            if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8174                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8175                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8176                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8177                                     "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
8178            }
8179        } else {
8180            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8181                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
8182                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
8183                                 string_VkImageLayout(srcImageLayout));
8184        }
8185    }
8186    return skip_call;
8187}
8188
8189static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
8190                                  VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) {
8191    bool skip_call = false;
8192
8193    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8194        uint32_t layer = i + subLayers.baseArrayLayer;
8195        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8196        IMAGE_CMD_BUF_LAYOUT_NODE node;
8197        if (!FindLayout(cb_node, destImage, sub, node)) {
8198            SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
8199            continue;
8200        }
8201        if (node.layout != destImageLayout) {
8202            skip_call |=
8203                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8204                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
8205                                                                        "doesn't match the current layout %s.",
8206                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
8207        }
8208    }
8209    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8210        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
8211            auto image_state = getImageState(dev_data, destImage);
8212            if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8213                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8214                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8215                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8216                                     "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
8217            }
8218        } else {
8219            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8220                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
8221                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
8222                                 string_VkImageLayout(destImageLayout));
8223        }
8224    }
8225    return skip_call;
8226}
8227
8228// Test if two VkExtent3D structs are equivalent
8229static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
8230    bool result = true;
8231    if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
8232        (extent->depth != other_extent->depth)) {
8233        result = false;
8234    }
8235    return result;
8236}
8237
8238// Returns the image extent of a specific subresource.
8239static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
8240    const uint32_t mip = subresource->mipLevel;
8241    VkExtent3D extent = img->createInfo.extent;
8242    extent.width = std::max(1U, extent.width >> mip);
8243    extent.height = std::max(1U, extent.height >> mip);
8244    extent.depth = std::max(1U, extent.depth >> mip);
8245    return extent;
8246}
8247
8248// Test if the extent argument has all dimensions set to 0.
8249static inline bool IsExtentZero(const VkExtent3D *extent) {
8250    return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
8251}
8252
8253// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
8254static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
8255    // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
8256    VkExtent3D granularity = { 0, 0, 0 };
8257    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
8258    if (pPool) {
8259        granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
8260        if (vk_format_is_compressed(img->createInfo.format)) {
8261            auto block_size = vk_format_compressed_block_size(img->createInfo.format);
8262            granularity.width *= block_size.width;
8263            granularity.height *= block_size.height;
8264        }
8265    }
8266    return granularity;
8267}
8268
8269// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
8270static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
8271    bool valid = true;
8272    if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
8273        (vk_safe_modulo(extent->height, granularity->height) != 0)) {
8274        valid = false;
8275    }
8276    return valid;
8277}
8278
8279// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
8280static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
8281                                  const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
8282    bool skip = false;
8283    VkExtent3D offset_extent = {};
8284    offset_extent.width = static_cast<uint32_t>(abs(offset->x));
8285    offset_extent.height = static_cast<uint32_t>(abs(offset->y));
8286    offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
8287    if (IsExtentZero(granularity)) {
8288        // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
8289        if (IsExtentZero(&offset_extent) == false) {
8290            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8291                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8292                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
8293                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8294                            function, i, member, offset->x, offset->y, offset->z);
8295        }
8296    } else {
8297        // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
8298        // integer multiples of the image transfer granularity.
8299        if (IsExtentAligned(&offset_extent, granularity) == false) {
8300            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8301                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8302                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
8303                            "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
8304                            function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
8305                            granularity->depth);
8306        }
8307    }
8308    return skip;
8309}
8310
8311// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
8312static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
8313                                  const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
8314                                  const uint32_t i, const char *function, const char *member) {
8315    bool skip = false;
8316    if (IsExtentZero(granularity)) {
8317        // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
8318        // subresource extent.
8319        if (IsExtentEqual(extent, subresource_extent) == false) {
8320            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8321                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8322                            "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
8323                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8324                            function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
8325                            subresource_extent->height, subresource_extent->depth);
8326        }
8327    } else {
8328        // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
8329        // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
8330        // subresource extent dimensions.
8331        VkExtent3D offset_extent_sum = {};
8332        offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
8333        offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
8334        offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
8335        if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
8336            skip |=
8337                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8338                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8339                        "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
8340                        "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
8341                        "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
8342                        function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
8343                        granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
8344                        subresource_extent->width, subresource_extent->height, subresource_extent->depth);
8345        }
8346    }
8347    return skip;
8348}
8349
8350// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
8351static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
8352                               const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8353    bool skip = false;
8354    if (vk_safe_modulo(value, granularity) != 0) {
8355        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8356                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8357                        "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
8358                        "transfer granularity width (%d).",
8359                        function, i, member, value, granularity);
8360    }
8361    return skip;
8362}
8363
8364// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
8365static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
8366                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8367    bool skip = false;
8368    if (vk_safe_modulo(value, granularity) != 0) {
8369        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8370                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8371                        "%s: pRegion[%d].%s (%" PRIdLEAST64
8372                        ") must be an even integer multiple of this command buffer's queue family image transfer "
8373                        "granularity width (%d).",
8374                        function, i, member, value, granularity);
8375    }
8376    return skip;
8377}
8378
8379// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
8380static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8381                                                                    const IMAGE_STATE *img, const VkImageCopy *region,
8382                                                                    const uint32_t i, const char *function) {
8383    bool skip = false;
8384    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8385    skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
8386    skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
8387    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
8388    skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
8389                           "extent");
8390    return skip;
8391}
8392
8393// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
8394static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8395                                                                          const IMAGE_STATE *img, const VkBufferImageCopy *region,
8396                                                                          const uint32_t i, const char *function) {
8397    bool skip = false;
8398    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8399    skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
8400    skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
8401    skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
8402    skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8403    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8404    skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8405                           function, "imageExtent");
8406    return skip;
8407}
8408
8409VKAPI_ATTR void VKAPI_CALL
8410CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8411             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
8412    bool skip_call = false;
8413    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8414    std::unique_lock<std::mutex> lock(global_lock);
8415
8416    auto cb_node = getCBNode(dev_data, commandBuffer);
8417    auto src_image_state = getImageState(dev_data, srcImage);
8418    auto dst_image_state = getImageState(dev_data, dstImage);
8419    if (cb_node && src_image_state && dst_image_state) {
8420        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImage()");
8421        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyImage()");
8422        // Update bindings between images and cmd buffer
8423        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8424        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8425        // Validate that SRC & DST images have correct usage flags set
8426        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8427                                             VALIDATION_ERROR_01178, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8428        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8429                                             VALIDATION_ERROR_01181, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8430        std::function<bool()> function = [=]() {
8431            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImage()");
8432        };
8433        cb_node->validate_functions.push_back(function);
8434        function = [=]() {
8435            SetImageMemoryValid(dev_data, dst_image_state, true);
8436            return false;
8437        };
8438        cb_node->validate_functions.push_back(function);
8439
8440        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8441        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()");
8442        for (uint32_t i = 0; i < regionCount; ++i) {
8443            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout);
8444            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout);
8445            skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8446                                                                          "vkCmdCopyImage()");
8447        }
8448    } else {
8449        assert(0);
8450    }
8451    lock.unlock();
8452    if (!skip_call)
8453        dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8454                                              pRegions);
8455}
8456
8457// Validate that an image's sampleCount matches the requirement for a specific API call
8458static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
8459                                            const char *location) {
8460    bool skip = false;
8461    if (image_state->createInfo.samples != sample_count) {
8462        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8463                       reinterpret_cast<uint64_t &>(image_state->image), 0, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
8464                       "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s.", location,
8465                       reinterpret_cast<uint64_t &>(image_state->image),
8466                       string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
8467    }
8468    return skip;
8469}
8470
8471VKAPI_ATTR void VKAPI_CALL
8472CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8473             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
8474    bool skip_call = false;
8475    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8476    std::unique_lock<std::mutex> lock(global_lock);
8477
8478    auto cb_node = getCBNode(dev_data, commandBuffer);
8479    auto src_image_state = getImageState(dev_data, srcImage);
8480    auto dst_image_state = getImageState(dev_data, dstImage);
8481    if (cb_node && src_image_state && dst_image_state) {
8482        skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage");
8483        skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage");
8484        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdBlitImage()");
8485        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdBlitImage()");
8486        // Update bindings between images and cmd buffer
8487        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8488        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8489        // Validate that SRC & DST images have correct usage flags set
8490        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8491                                             VALIDATION_ERROR_02182, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8492        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8493                                             VALIDATION_ERROR_02186, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8494        std::function<bool()> function = [=]() {
8495            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdBlitImage()");
8496        };
8497        cb_node->validate_functions.push_back(function);
8498        function = [=]() {
8499            SetImageMemoryValid(dev_data, dst_image_state, true);
8500            return false;
8501        };
8502        cb_node->validate_functions.push_back(function);
8503
8504        skip_call |= addCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8505        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()");
8506    } else {
8507        assert(0);
8508    }
8509    lock.unlock();
8510    if (!skip_call)
8511        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8512                                              pRegions, filter);
8513}
8514
8515VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8516                                                VkImage dstImage, VkImageLayout dstImageLayout,
8517                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8518    bool skip_call = false;
8519    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8520    std::unique_lock<std::mutex> lock(global_lock);
8521
8522    auto cb_node = getCBNode(dev_data, commandBuffer);
8523    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
8524    auto dst_image_state = getImageState(dev_data, dstImage);
8525    if (cb_node && src_buff_node && dst_image_state) {
8526        skip_call |=
8527            ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage");
8528        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBufferToImage()");
8529        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyBufferToImage()");
8530        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
8531        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8532        skip_call |=
8533            ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, VALIDATION_ERROR_01230,
8534                                     "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8535        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8536                                             VALIDATION_ERROR_01231, "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8537        std::function<bool()> function = [=]() {
8538            SetImageMemoryValid(dev_data, dst_image_state, true);
8539            return false;
8540        };
8541        cb_node->validate_functions.push_back(function);
8542        function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBufferToImage()"); };
8543        cb_node->validate_functions.push_back(function);
8544
8545        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8546        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()");
8547        for (uint32_t i = 0; i < regionCount; ++i) {
8548            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8549            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8550                                                                                "vkCmdCopyBufferToImage()");
8551        }
8552    } else {
8553        assert(0);
8554    }
8555    lock.unlock();
8556    if (!skip_call)
8557        dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
8558}
8559
8560VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8561                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8562                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8563    bool skip_call = false;
8564    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8565    std::unique_lock<std::mutex> lock(global_lock);
8566
8567    auto cb_node = getCBNode(dev_data, commandBuffer);
8568    auto src_image_state = getImageState(dev_data, srcImage);
8569    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8570    if (cb_node && src_image_state && dst_buff_node) {
8571        skip_call |=
8572            ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage");
8573        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImageToBuffer()");
8574        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyImageToBuffer()");
8575        // Update bindings between buffer/image and cmd buffer
8576        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8577        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8578        // Validate that SRC image & DST buffer have correct usage flags set
8579        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8580                                             VALIDATION_ERROR_01248, "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8581        skip_call |=
8582            ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01252,
8583                                     "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8584        std::function<bool()> function = [=]() {
8585            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImageToBuffer()");
8586        };
8587        cb_node->validate_functions.push_back(function);
8588        function = [=]() {
8589            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8590            return false;
8591        };
8592        cb_node->validate_functions.push_back(function);
8593
8594        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8595        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()");
8596        for (uint32_t i = 0; i < regionCount; ++i) {
8597            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8598            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_image_state, &pRegions[i], i,
8599                                                                                "CmdCopyImageToBuffer");
8600        }
8601    } else {
8602        assert(0);
8603    }
8604    lock.unlock();
8605    if (!skip_call)
8606        dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
8607}
8608
8609VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8610                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8611    bool skip_call = false;
8612    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8613    std::unique_lock<std::mutex> lock(global_lock);
8614
8615    auto cb_node = getCBNode(dev_data, commandBuffer);
8616    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8617    if (cb_node && dst_buff_node) {
8618        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdUpdateBuffer()");
8619        // Update bindings between buffer and cmd buffer
8620        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8621        // Validate that DST buffer has correct usage flags set
8622        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8623                                              VALIDATION_ERROR_01146, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8624        std::function<bool()> function = [=]() {
8625            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8626            return false;
8627        };
8628        cb_node->validate_functions.push_back(function);
8629
8630        skip_call |= addCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8631        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyUpdateBuffer()");
8632    } else {
8633        assert(0);
8634    }
8635    lock.unlock();
8636    if (!skip_call)
8637        dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8638}
8639
8640VKAPI_ATTR void VKAPI_CALL
8641CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8642    bool skip_call = false;
8643    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8644    std::unique_lock<std::mutex> lock(global_lock);
8645
8646    auto cb_node = getCBNode(dev_data, commandBuffer);
8647    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8648    if (cb_node && dst_buff_node) {
8649        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdFillBuffer()");
8650        // Update bindings between buffer and cmd buffer
8651        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8652        // Validate that DST buffer has correct usage flags set
8653        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8654                                              VALIDATION_ERROR_01137, "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8655        std::function<bool()> function = [=]() {
8656            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8657            return false;
8658        };
8659        cb_node->validate_functions.push_back(function);
8660
8661        skip_call |= addCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8662        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyFillBuffer()");
8663    } else {
8664        assert(0);
8665    }
8666    lock.unlock();
8667    if (!skip_call)
8668        dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8669}
8670
8671VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8672                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
8673                                               const VkClearRect *pRects) {
8674    bool skip_call = false;
8675    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8676    std::unique_lock<std::mutex> lock(global_lock);
8677    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8678    if (pCB) {
8679        skip_call |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8680        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8681        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8682            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8683            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8684            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8685            // call CmdClearAttachments
8686            // Otherwise this seems more like a performance warning.
8687            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8688                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer),
8689                                 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8690                                 "vkCmdClearAttachments() issued on command buffer object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8691                                 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8692                                 (uint64_t)(commandBuffer));
8693        }
8694        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()");
8695    }
8696
8697    // Validate that attachment is in reference list of active subpass
8698    if (pCB->activeRenderPass) {
8699        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->createInfo.ptr();
8700        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8701        auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
8702
8703        for (uint32_t i = 0; i < attachmentCount; i++) {
8704            auto clear_desc = &pAttachments[i];
8705            VkImageView image_view = VK_NULL_HANDLE;
8706
8707            if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8708                if (clear_desc->colorAttachment >= pSD->colorAttachmentCount) {
8709                    skip_call |= log_msg(
8710                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8711                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_01114, "DS",
8712                        "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d. %s",
8713                        clear_desc->colorAttachment, pCB->activeSubpass, validation_error_map[VALIDATION_ERROR_01114]);
8714                }
8715                else if (pSD->pColorAttachments[clear_desc->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
8716                    skip_call |= log_msg(
8717                        dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8718                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8719                        "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored.",
8720                        clear_desc->colorAttachment);
8721                }
8722                else {
8723                    image_view = framebuffer->createInfo.pAttachments[pSD->pColorAttachments[clear_desc->colorAttachment].attachment];
8724                }
8725            } else if (clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8726                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8727                    (pSD->pDepthStencilAttachment->attachment ==
8728                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8729
8730                    skip_call |= log_msg(
8731                        dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8732                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8733                        "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
8734                }
8735                else {
8736                    image_view = framebuffer->createInfo.pAttachments[pSD->pDepthStencilAttachment->attachment];
8737                }
8738            }
8739
8740            if (image_view) {
8741                auto image_view_state = getImageViewState(dev_data, image_view);
8742                auto aspects_present = image_view_state->create_info.subresourceRange.aspectMask;
8743                auto extra_aspects = clear_desc->aspectMask & ~aspects_present;
8744
8745                if (extra_aspects) {
8746                    skip_call |= log_msg(
8747                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
8748                            reinterpret_cast<uint64_t &>(image_view), __LINE__, VALIDATION_ERROR_01125, "DS",
8749                            "vkCmdClearAttachments() with aspects not present in image view: %s. %s",
8750                            string_VkImageAspectFlagBits((VkImageAspectFlagBits)extra_aspects),
8751                            validation_error_map[VALIDATION_ERROR_01125]);
8752                }
8753            }
8754        }
8755    }
8756    lock.unlock();
8757    if (!skip_call)
8758        dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8759}
8760
8761VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8762                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
8763                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8764    bool skip_call = false;
8765    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8766    std::unique_lock<std::mutex> lock(global_lock);
8767    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8768
8769    auto cb_node = getCBNode(dev_data, commandBuffer);
8770    auto image_state = getImageState(dev_data, image);
8771    if (cb_node && image_state) {
8772        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearColorImage()");
8773        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
8774        std::function<bool()> function = [=]() {
8775            SetImageMemoryValid(dev_data, image_state, true);
8776            return false;
8777        };
8778        cb_node->validate_functions.push_back(function);
8779
8780        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8781        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()");
8782    } else {
8783        assert(0);
8784    }
8785    lock.unlock();
8786    if (!skip_call)
8787        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8788}
8789
8790VKAPI_ATTR void VKAPI_CALL
8791CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8792                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8793                          const VkImageSubresourceRange *pRanges) {
8794    bool skip_call = false;
8795    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8796    std::unique_lock<std::mutex> lock(global_lock);
8797    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8798
8799    auto cb_node = getCBNode(dev_data, commandBuffer);
8800    auto image_state = getImageState(dev_data, image);
8801    if (cb_node && image_state) {
8802        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearDepthStencilImage()");
8803        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
8804        std::function<bool()> function = [=]() {
8805            SetImageMemoryValid(dev_data, image_state, true);
8806            return false;
8807        };
8808        cb_node->validate_functions.push_back(function);
8809
8810        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8811        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()");
8812    } else {
8813        assert(0);
8814    }
8815    lock.unlock();
8816    if (!skip_call)
8817        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
8818}
8819
8820VKAPI_ATTR void VKAPI_CALL
8821CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8822                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8823    bool skip_call = false;
8824    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8825    std::unique_lock<std::mutex> lock(global_lock);
8826
8827    auto cb_node = getCBNode(dev_data, commandBuffer);
8828    auto src_image_state = getImageState(dev_data, srcImage);
8829    auto dst_image_state = getImageState(dev_data, dstImage);
8830    if (cb_node && src_image_state && dst_image_state) {
8831        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdResolveImage()");
8832        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdResolveImage()");
8833        // Update bindings between images and cmd buffer
8834        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8835        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8836        std::function<bool()> function = [=]() {
8837            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdResolveImage()");
8838        };
8839        cb_node->validate_functions.push_back(function);
8840        function = [=]() {
8841            SetImageMemoryValid(dev_data, dst_image_state, true);
8842            return false;
8843        };
8844        cb_node->validate_functions.push_back(function);
8845
8846        skip_call |= addCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8847        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()");
8848    } else {
8849        assert(0);
8850    }
8851    lock.unlock();
8852    if (!skip_call)
8853        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8854                                                 pRegions);
8855}
8856
8857bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8858    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8859    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8860    if (pCB) {
8861        pCB->eventToStageMap[event] = stageMask;
8862    }
8863    auto queue_data = dev_data->queueMap.find(queue);
8864    if (queue_data != dev_data->queueMap.end()) {
8865        queue_data->second.eventToStageMap[event] = stageMask;
8866    }
8867    return false;
8868}
8869
8870VKAPI_ATTR void VKAPI_CALL
8871CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8872    bool skip_call = false;
8873    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8874    std::unique_lock<std::mutex> lock(global_lock);
8875    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8876    if (pCB) {
8877        skip_call |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8878        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8879        auto event_state = getEventNode(dev_data, event);
8880        if (event_state) {
8881            addCommandBufferBinding(&event_state->cb_bindings,
8882                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8883            event_state->cb_bindings.insert(pCB);
8884        }
8885        pCB->events.push_back(event);
8886        if (!pCB->waitedEvents.count(event)) {
8887            pCB->writeEventsBeforeWait.push_back(event);
8888        }
8889        std::function<bool(VkQueue)> eventUpdate =
8890            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8891        pCB->eventUpdates.push_back(eventUpdate);
8892    }
8893    lock.unlock();
8894    if (!skip_call)
8895        dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
8896}
8897
8898VKAPI_ATTR void VKAPI_CALL
8899CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8900    bool skip_call = false;
8901    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8902    std::unique_lock<std::mutex> lock(global_lock);
8903    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8904    if (pCB) {
8905        skip_call |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8906        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8907        auto event_state = getEventNode(dev_data, event);
8908        if (event_state) {
8909            addCommandBufferBinding(&event_state->cb_bindings,
8910                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8911            event_state->cb_bindings.insert(pCB);
8912        }
8913        pCB->events.push_back(event);
8914        if (!pCB->waitedEvents.count(event)) {
8915            pCB->writeEventsBeforeWait.push_back(event);
8916        }
8917        std::function<bool(VkQueue)> eventUpdate =
8918            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8919        pCB->eventUpdates.push_back(eventUpdate);
8920    }
8921    lock.unlock();
8922    if (!skip_call)
8923        dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
8924}
8925
8926static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8927                                   const VkImageMemoryBarrier *pImgMemBarriers) {
8928    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8929    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8930    bool skip = false;
8931    uint32_t levelCount = 0;
8932    uint32_t layerCount = 0;
8933
8934    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8935        auto mem_barrier = &pImgMemBarriers[i];
8936        if (!mem_barrier)
8937            continue;
8938        // TODO: Do not iterate over every possibility - consolidate where
8939        // possible
8940        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8941
8942        for (uint32_t j = 0; j < levelCount; j++) {
8943            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8944            for (uint32_t k = 0; k < layerCount; k++) {
8945                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8946                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8947                IMAGE_CMD_BUF_LAYOUT_NODE node;
8948                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8949                    SetLayout(pCB, mem_barrier->image, sub,
8950                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8951                    continue;
8952                }
8953                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8954                    // TODO: Set memory invalid which is in mem_tracker currently
8955                } else if (node.layout != mem_barrier->oldLayout) {
8956                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8957                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8958                                                                                    "when current layout is %s.",
8959                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8960                }
8961                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8962            }
8963        }
8964    }
8965    return skip;
8966}
8967
8968// Print readable FlagBits in FlagMask
8969static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8970    std::string result;
8971    std::string separator;
8972
8973    if (accessMask == 0) {
8974        result = "[None]";
8975    } else {
8976        result = "[";
8977        for (auto i = 0; i < 32; i++) {
8978            if (accessMask & (1 << i)) {
8979                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8980                separator = " | ";
8981            }
8982        }
8983        result = result + "]";
8984    }
8985    return result;
8986}
8987
8988// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8989// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8990// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8991static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8992                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8993                             const char *type) {
8994    bool skip_call = false;
8995
8996    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8997        if (accessMask & ~(required_bit | optional_bits)) {
8998            // TODO: Verify against Valid Use
8999            skip_call |=
9000                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9001                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
9002                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
9003        }
9004    } else {
9005        if (!required_bit) {
9006            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9007                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
9008                                                                  "%s when layout is %s, unless the app has previously added a "
9009                                                                  "barrier for this transition.",
9010                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
9011                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
9012        } else {
9013            std::string opt_bits;
9014            if (optional_bits != 0) {
9015                std::stringstream ss;
9016                ss << optional_bits;
9017                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
9018            }
9019            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9020                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
9021                                                                  "layout is %s, unless the app has previously added a barrier for "
9022                                                                  "this transition.",
9023                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
9024                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
9025        }
9026    }
9027    return skip_call;
9028}
9029
9030static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
9031                                        const VkImageLayout &layout, const char *type) {
9032    bool skip_call = false;
9033    switch (layout) {
9034    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
9035        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
9036                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
9037        break;
9038    }
9039    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
9040        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
9041                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
9042        break;
9043    }
9044    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
9045        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
9046        break;
9047    }
9048    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
9049        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
9050                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
9051                                      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
9052        break;
9053    }
9054    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
9055        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
9056                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
9057        break;
9058    }
9059    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
9060        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
9061        break;
9062    }
9063    case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: {
9064        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type);
9065        break;
9066    }
9067    case VK_IMAGE_LAYOUT_UNDEFINED: {
9068        if (accessMask != 0) {
9069            // TODO: Verify against Valid Use section spec
9070            skip_call |=
9071                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9072                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
9073                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
9074        }
9075        break;
9076    }
9077    case VK_IMAGE_LAYOUT_GENERAL:
9078    default: { break; }
9079    }
9080    return skip_call;
9081}
9082
9083static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
9084                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
9085                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
9086                             const VkImageMemoryBarrier *pImageMemBarriers) {
9087    bool skip = false;
9088    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9089    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9090    if (pCB->activeRenderPass && memBarrierCount) {
9091        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
9092            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9093                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
9094                                                             "with no self dependency specified.",
9095                            funcName, pCB->activeSubpass);
9096        }
9097    }
9098    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
9099        auto mem_barrier = &pImageMemBarriers[i];
9100        auto image_data = getImageState(dev_data, mem_barrier->image);
9101        if (image_data) {
9102            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
9103            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
9104            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
9105                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
9106                // be VK_QUEUE_FAMILY_IGNORED
9107                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
9108                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9109                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9110                                    "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
9111                                    "VK_SHARING_MODE_CONCURRENT. Src and dst "
9112                                    "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
9113                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
9114                }
9115            } else {
9116                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
9117                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
9118                // or both be a valid queue family
9119                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
9120                    (src_q_f_index != dst_q_f_index)) {
9121                    skip |=
9122                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9123                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
9124                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
9125                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
9126                                                                     "must be.",
9127                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
9128                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
9129                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
9130                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
9131                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9132                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9133                                    "%s: Image 0x%" PRIx64 " was created with sharingMode "
9134                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
9135                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
9136                                    "queueFamilies crated for this device.",
9137                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, dst_q_f_index,
9138                                    dev_data->phys_dev_properties.queue_family_properties.size());
9139                }
9140            }
9141        }
9142
9143        if (mem_barrier) {
9144            if (mem_barrier->oldLayout != mem_barrier->newLayout) {
9145                skip |=
9146                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
9147                skip |=
9148                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
9149            }
9150            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
9151                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9152                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
9153                                                                 "PREINITIALIZED.",
9154                                funcName);
9155            }
9156            auto image_data = getImageState(dev_data, mem_barrier->image);
9157            VkFormat format = VK_FORMAT_UNDEFINED;
9158            uint32_t arrayLayers = 0, mipLevels = 0;
9159            bool imageFound = false;
9160            if (image_data) {
9161                format = image_data->createInfo.format;
9162                arrayLayers = image_data->createInfo.arrayLayers;
9163                mipLevels = image_data->createInfo.mipLevels;
9164                imageFound = true;
9165            } else if (dev_data->device_extensions.wsi_enabled) {
9166                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
9167                if (imageswap_data) {
9168                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
9169                    if (swapchain_data) {
9170                        format = swapchain_data->createInfo.imageFormat;
9171                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
9172                        mipLevels = 1;
9173                        imageFound = true;
9174                    }
9175                }
9176            }
9177            if (imageFound) {
9178                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
9179                skip |= ValidateImageAspectMask(dev_data, image_data->image, format, aspect_mask, funcName);
9180                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
9181                                     ? 1
9182                                     : mem_barrier->subresourceRange.layerCount;
9183                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
9184                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9185                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
9186                                                                               "baseArrayLayer (%d) and layerCount (%d) be less "
9187                                                                               "than or equal to the total number of layers (%d).",
9188                                    funcName, mem_barrier->subresourceRange.baseArrayLayer,
9189                                    mem_barrier->subresourceRange.layerCount, arrayLayers);
9190                }
9191                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
9192                                     ? 1
9193                                     : mem_barrier->subresourceRange.levelCount;
9194                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
9195                    skip |= log_msg(
9196                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9197                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
9198                                                         "(%d) and levelCount (%d) be less than or equal to "
9199                                                         "the total number of levels (%d).",
9200                        funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount, mipLevels);
9201                }
9202            }
9203        }
9204    }
9205    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
9206        auto mem_barrier = &pBufferMemBarriers[i];
9207        if (pCB->activeRenderPass) {
9208            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9209                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
9210        }
9211        if (!mem_barrier)
9212            continue;
9213
9214        // Validate buffer barrier queue family indices
9215        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
9216             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
9217            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
9218             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
9219            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9220                            DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9221                            "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
9222                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
9223                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9224                            dev_data->phys_dev_properties.queue_family_properties.size());
9225        }
9226
9227        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
9228        if (buffer_node) {
9229            auto buffer_size = buffer_node->binding.size;
9230            if (mem_barrier->offset >= buffer_size) {
9231                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9232                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64
9233                                                                 " which is not less than total size 0x%" PRIx64 ".",
9234                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9235                                reinterpret_cast<const uint64_t &>(mem_barrier->offset),
9236                                reinterpret_cast<const uint64_t &>(buffer_size));
9237            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
9238                skip |= log_msg(
9239                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9240                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
9241                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
9242                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9243                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
9244                    reinterpret_cast<const uint64_t &>(buffer_size));
9245            }
9246        }
9247    }
9248    return skip;
9249}
9250
9251bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
9252    bool skip_call = false;
9253    VkPipelineStageFlags stageMask = 0;
9254    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9255    for (uint32_t i = 0; i < eventCount; ++i) {
9256        auto event = pCB->events[firstEventIndex + i];
9257        auto queue_data = dev_data->queueMap.find(queue);
9258        if (queue_data == dev_data->queueMap.end())
9259            return false;
9260        auto event_data = queue_data->second.eventToStageMap.find(event);
9261        if (event_data != queue_data->second.eventToStageMap.end()) {
9262            stageMask |= event_data->second;
9263        } else {
9264            auto global_event_data = getEventNode(dev_data, event);
9265            if (!global_event_data) {
9266                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9267                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
9268                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
9269                                     reinterpret_cast<const uint64_t &>(event));
9270            } else {
9271                stageMask |= global_event_data->stageMask;
9272            }
9273        }
9274    }
9275    // TODO: Need to validate that host_bit is only set if set event is called
9276    // but set event can be called at any time.
9277    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
9278        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9279                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
9280                                                            "using srcStageMask 0x%X which must be the bitwise "
9281                                                            "OR of the stageMask parameters used in calls to "
9282                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
9283                                                            "used with vkSetEvent but instead is 0x%X.",
9284                             sourceStageMask, stageMask);
9285    }
9286    return skip_call;
9287}
9288
9289VKAPI_ATTR void VKAPI_CALL
9290CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
9291              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9292              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9293              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9294    bool skip_call = false;
9295    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9296    std::unique_lock<std::mutex> lock(global_lock);
9297    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9298    if (pCB) {
9299        auto firstEventIndex = pCB->events.size();
9300        for (uint32_t i = 0; i < eventCount; ++i) {
9301            auto event_state = getEventNode(dev_data, pEvents[i]);
9302            if (event_state) {
9303                addCommandBufferBinding(&event_state->cb_bindings,
9304                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
9305                                        pCB);
9306                event_state->cb_bindings.insert(pCB);
9307            }
9308            pCB->waitedEvents.insert(pEvents[i]);
9309            pCB->events.push_back(pEvents[i]);
9310        }
9311        std::function<bool(VkQueue)> eventUpdate =
9312            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
9313        pCB->eventUpdates.push_back(eventUpdate);
9314        if (pCB->state == CB_RECORDING) {
9315            skip_call |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
9316        } else {
9317            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
9318        }
9319        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9320        skip_call |=
9321            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9322                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9323    }
9324    lock.unlock();
9325    if (!skip_call)
9326        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
9327                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9328                                               imageMemoryBarrierCount, pImageMemoryBarriers);
9329}
9330
9331VKAPI_ATTR void VKAPI_CALL
9332CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
9333                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9334                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9335                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9336    bool skip_call = false;
9337    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9338    std::unique_lock<std::mutex> lock(global_lock);
9339    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9340    if (pCB) {
9341        skip_call |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
9342        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9343        skip_call |=
9344            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9345                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9346    }
9347    lock.unlock();
9348    if (!skip_call)
9349        dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
9350                                                    pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9351                                                    imageMemoryBarrierCount, pImageMemoryBarriers);
9352}
9353
9354bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
9355    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9356    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9357    if (pCB) {
9358        pCB->queryToStateMap[object] = value;
9359    }
9360    auto queue_data = dev_data->queueMap.find(queue);
9361    if (queue_data != dev_data->queueMap.end()) {
9362        queue_data->second.queryToStateMap[object] = value;
9363    }
9364    return false;
9365}
9366
9367VKAPI_ATTR void VKAPI_CALL
9368CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
9369    bool skip_call = false;
9370    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9371    std::unique_lock<std::mutex> lock(global_lock);
9372    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9373    if (pCB) {
9374        QueryObject query = {queryPool, slot};
9375        pCB->activeQueries.insert(query);
9376        if (!pCB->startedQueries.count(query)) {
9377            pCB->startedQueries.insert(query);
9378        }
9379        skip_call |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
9380        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9381                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9382    }
9383    lock.unlock();
9384    if (!skip_call)
9385        dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
9386}
9387
9388VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
9389    bool skip_call = false;
9390    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9391    std::unique_lock<std::mutex> lock(global_lock);
9392    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9393    if (pCB) {
9394        QueryObject query = {queryPool, slot};
9395        if (!pCB->activeQueries.count(query)) {
9396            skip_call |=
9397                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9398                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
9399                        (uint64_t)(queryPool), slot);
9400        } else {
9401            pCB->activeQueries.erase(query);
9402        }
9403        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9404        pCB->queryUpdates.push_back(queryUpdate);
9405        if (pCB->state == CB_RECORDING) {
9406            skip_call |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
9407        } else {
9408            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
9409        }
9410        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9411                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9412    }
9413    lock.unlock();
9414    if (!skip_call)
9415        dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
9416}
9417
9418VKAPI_ATTR void VKAPI_CALL
9419CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
9420    bool skip_call = false;
9421    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9422    std::unique_lock<std::mutex> lock(global_lock);
9423    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9424    if (pCB) {
9425        for (uint32_t i = 0; i < queryCount; i++) {
9426            QueryObject query = {queryPool, firstQuery + i};
9427            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
9428            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
9429            pCB->queryUpdates.push_back(queryUpdate);
9430        }
9431        if (pCB->state == CB_RECORDING) {
9432            skip_call |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9433        } else {
9434            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
9435        }
9436        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
9437        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9438                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9439    }
9440    lock.unlock();
9441    if (!skip_call)
9442        dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
9443}
9444
9445bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9446    bool skip_call = false;
9447    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9448    auto queue_data = dev_data->queueMap.find(queue);
9449    if (queue_data == dev_data->queueMap.end())
9450        return false;
9451    for (uint32_t i = 0; i < queryCount; i++) {
9452        QueryObject query = {queryPool, firstQuery + i};
9453        auto query_data = queue_data->second.queryToStateMap.find(query);
9454        bool fail = false;
9455        if (query_data != queue_data->second.queryToStateMap.end()) {
9456            if (!query_data->second) {
9457                fail = true;
9458            }
9459        } else {
9460            auto global_query_data = dev_data->queryToStateMap.find(query);
9461            if (global_query_data != dev_data->queryToStateMap.end()) {
9462                if (!global_query_data->second) {
9463                    fail = true;
9464                }
9465            } else {
9466                fail = true;
9467            }
9468        }
9469        if (fail) {
9470            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9471                                 DRAWSTATE_INVALID_QUERY, "DS",
9472                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9473                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9474        }
9475    }
9476    return skip_call;
9477}
9478
9479VKAPI_ATTR void VKAPI_CALL
9480CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
9481                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
9482    bool skip_call = false;
9483    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9484    std::unique_lock<std::mutex> lock(global_lock);
9485
9486    auto cb_node = getCBNode(dev_data, commandBuffer);
9487    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
9488    if (cb_node && dst_buff_node) {
9489        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyQueryPoolResults()");
9490        // Update bindings between buffer and cmd buffer
9491        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
9492        // Validate that DST buffer has correct usage flags set
9493        skip_call |=
9494            ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01066,
9495                                     "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
9496        std::function<bool()> function = [=]() {
9497            SetBufferMemoryValid(dev_data, dst_buff_node, true);
9498            return false;
9499        };
9500        cb_node->validate_functions.push_back(function);
9501        std::function<bool(VkQueue)> queryUpdate =
9502            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9503        cb_node->queryUpdates.push_back(queryUpdate);
9504        if (cb_node->state == CB_RECORDING) {
9505            skip_call |= addCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9506        } else {
9507            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
9508        }
9509        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()");
9510        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9511                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
9512    } else {
9513        assert(0);
9514    }
9515    lock.unlock();
9516    if (!skip_call)
9517        dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
9518                                                         stride, flags);
9519}
9520
9521VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9522                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9523                                            const void *pValues) {
9524    bool skip_call = false;
9525    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9526    std::unique_lock<std::mutex> lock(global_lock);
9527    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9528    if (pCB) {
9529        if (pCB->state == CB_RECORDING) {
9530            skip_call |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9531        } else {
9532            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9533        }
9534    }
9535    skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
9536    if (0 == stageFlags) {
9537        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9538                             DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
9539    }
9540
9541    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
9542    auto pipeline_layout = getPipelineLayout(dev_data, layout);
9543    // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9544    // contained in the pipeline ranges.
9545    // Build a {start, end} span list for ranges with matching stage flags.
9546    const auto &ranges = pipeline_layout->push_constant_ranges;
9547    struct span {
9548        uint32_t start;
9549        uint32_t end;
9550    };
9551    std::vector<span> spans;
9552    spans.reserve(ranges.size());
9553    for (const auto &iter : ranges) {
9554        if (iter.stageFlags == stageFlags) {
9555            spans.push_back({iter.offset, iter.offset + iter.size});
9556        }
9557    }
9558    if (spans.size() == 0) {
9559        // There were no ranges that matched the stageFlags.
9560        skip_call |=
9561            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9562                    DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9563                                                          "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
9564                    (uint32_t)stageFlags, (uint64_t)layout);
9565    } else {
9566        // Sort span list by start value.
9567        struct comparer {
9568            bool operator()(struct span i, struct span j) { return i.start < j.start; }
9569        } my_comparer;
9570        std::sort(spans.begin(), spans.end(), my_comparer);
9571
9572        // Examine two spans at a time.
9573        std::vector<span>::iterator current = spans.begin();
9574        std::vector<span>::iterator next = current + 1;
9575        while (next != spans.end()) {
9576            if (current->end < next->start) {
9577                // There is a gap; cannot coalesce. Move to the next two spans.
9578                ++current;
9579                ++next;
9580            } else {
9581                // Coalesce the two spans.  The start of the next span
9582                // is within the current span, so pick the larger of
9583                // the end values to extend the current span.
9584                // Then delete the next span and set next to the span after it.
9585                current->end = max(current->end, next->end);
9586                next = spans.erase(next);
9587            }
9588        }
9589
9590        // Now we can check if the incoming range is within any of the spans.
9591        bool contained_in_a_range = false;
9592        for (uint32_t i = 0; i < spans.size(); ++i) {
9593            if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9594                contained_in_a_range = true;
9595                break;
9596            }
9597        }
9598        if (!contained_in_a_range) {
9599            skip_call |=
9600                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9601                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9602                                                              "with stageFlags = 0x%" PRIx32 " "
9603                                                              "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
9604                        offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
9605        }
9606    }
9607    lock.unlock();
9608    if (!skip_call)
9609        dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9610}
9611
9612VKAPI_ATTR void VKAPI_CALL
9613CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9614    bool skip_call = false;
9615    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9616    std::unique_lock<std::mutex> lock(global_lock);
9617    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9618    if (pCB) {
9619        QueryObject query = {queryPool, slot};
9620        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9621        pCB->queryUpdates.push_back(queryUpdate);
9622        if (pCB->state == CB_RECORDING) {
9623            skip_call |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9624        } else {
9625            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9626        }
9627    }
9628    lock.unlock();
9629    if (!skip_call)
9630        dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9631}
9632
9633static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9634                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
9635    bool skip_call = false;
9636
9637    for (uint32_t attach = 0; attach < count; attach++) {
9638        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9639            // Attachment counts are verified elsewhere, but prevent an invalid access
9640            if (attachments[attach].attachment < fbci->attachmentCount) {
9641                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9642                auto view_state = getImageViewState(dev_data, *image_view);
9643                if (view_state) {
9644                    const VkImageCreateInfo *ici = &getImageState(dev_data, view_state->create_info.image)->createInfo;
9645                    if (ici != nullptr) {
9646                        if ((ici->usage & usage_flag) == 0) {
9647                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9648                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
9649                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9650                                                 "IMAGE_USAGE flags (%s).",
9651                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
9652                        }
9653                    }
9654                }
9655            }
9656        }
9657    }
9658    return skip_call;
9659}
9660
9661// Validate VkFramebufferCreateInfo which includes:
9662// 1. attachmentCount equals renderPass attachmentCount
9663// 2. corresponding framebuffer and renderpass attachments have matching formats
9664// 3. corresponding framebuffer and renderpass attachments have matching sample counts
9665// 4. fb attachments only have a single mip level
9666// 5. fb attachment dimensions are each at least as large as the fb
9667// 6. fb attachments use idenity swizzle
9668// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9669// 8. fb dimensions are within physical device limits
9670static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9671    bool skip_call = false;
9672
9673    auto rp_state = getRenderPassState(dev_data, pCreateInfo->renderPass);
9674    if (rp_state) {
9675        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
9676        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9677            skip_call |= log_msg(
9678                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9679                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9680                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9681                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
9682                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9683        } else {
9684            // attachmentCounts match, so make sure corresponding attachment details line up
9685            const VkImageView *image_views = pCreateInfo->pAttachments;
9686            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9687                auto view_state = getImageViewState(dev_data, image_views[i]);
9688                auto &ivci = view_state->create_info;
9689                if (ivci.format != rpci->pAttachments[i].format) {
9690                    skip_call |= log_msg(
9691                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9692                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9693                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9694                              "the format of "
9695                              "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9696                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
9697                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9698                }
9699                const VkImageCreateInfo *ici = &getImageState(dev_data, ivci.image)->createInfo;
9700                if (ici->samples != rpci->pAttachments[i].samples) {
9701                    skip_call |= log_msg(
9702                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9703                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9704                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9705                              "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9706                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
9707                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9708                }
9709                // Verify that view only has a single mip level
9710                if (ivci.subresourceRange.levelCount != 1) {
9711                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9712                                         __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9713                                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
9714                                         "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
9715                                         i, ivci.subresourceRange.levelCount);
9716                }
9717                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
9718                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
9719                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
9720                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
9721                    (mip_height < pCreateInfo->height)) {
9722                    skip_call |=
9723                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9724                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9725                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
9726                                "than the corresponding "
9727                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
9728                                "dimensions for "
9729                                "attachment #%u, framebuffer:\n"
9730                                "width: %u, %u\n"
9731                                "height: %u, %u\n"
9732                                "layerCount: %u, %u\n",
9733                                i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
9734                                pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
9735                }
9736                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
9737                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
9738                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
9739                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
9740                    skip_call |= log_msg(
9741                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9742                        DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9743                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
9744                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
9745                        "r swizzle = %s\n"
9746                        "g swizzle = %s\n"
9747                        "b swizzle = %s\n"
9748                        "a swizzle = %s\n",
9749                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
9750                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
9751                }
9752            }
9753        }
9754        // Verify correct attachment usage flags
9755        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
9756            // Verify input attachments:
9757            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
9758                                    rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
9759            // Verify color attachments:
9760            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
9761                                    rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
9762            // Verify depth/stencil attachments:
9763            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
9764                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
9765                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
9766            }
9767        }
9768    } else {
9769        skip_call |=
9770            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9771                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9772                    "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
9773                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9774    }
9775    // Verify FB dimensions are within physical device limits
9776    if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
9777        (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
9778        (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
9779        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9780                             DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9781                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
9782                             "Here are the respective dimensions: requested, device max:\n"
9783                             "width: %u, %u\n"
9784                             "height: %u, %u\n"
9785                             "layerCount: %u, %u\n",
9786                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
9787                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
9788                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
9789    }
9790    return skip_call;
9791}
9792
9793// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
9794//  Return true if an error is encountered and callback returns true to skip call down chain
9795//   false indicates that call down chain should proceed
9796static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9797    // TODO : Verify that renderPass FB is created with is compatible with FB
9798    bool skip_call = false;
9799    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
9800    return skip_call;
9801}
9802
9803// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
9804static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
9805    // Shadow create info and store in map
9806    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
9807        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
9808
9809    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9810        VkImageView view = pCreateInfo->pAttachments[i];
9811        auto view_state = getImageViewState(dev_data, view);
9812        if (!view_state) {
9813            continue;
9814        }
9815        MT_FB_ATTACHMENT_INFO fb_info;
9816        fb_info.mem = getImageState(dev_data, view_state->create_info.image)->binding.mem;
9817        fb_info.view_state = view_state;
9818        fb_info.image = view_state->create_info.image;
9819        fb_state->attachments.push_back(fb_info);
9820    }
9821    dev_data->frameBufferMap[fb] = std::move(fb_state);
9822}
9823
9824VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9825                                                 const VkAllocationCallbacks *pAllocator,
9826                                                 VkFramebuffer *pFramebuffer) {
9827    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9828    std::unique_lock<std::mutex> lock(global_lock);
9829    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
9830    lock.unlock();
9831
9832    if (skip_call)
9833        return VK_ERROR_VALIDATION_FAILED_EXT;
9834
9835    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9836
9837    if (VK_SUCCESS == result) {
9838        lock.lock();
9839        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
9840        lock.unlock();
9841    }
9842    return result;
9843}
9844
9845static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9846                           std::unordered_set<uint32_t> &processed_nodes) {
9847    // If we have already checked this node we have not found a dependency path so return false.
9848    if (processed_nodes.count(index))
9849        return false;
9850    processed_nodes.insert(index);
9851    const DAGNode &node = subpass_to_node[index];
9852    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9853    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9854        for (auto elem : node.prev) {
9855            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9856                return true;
9857        }
9858    } else {
9859        return true;
9860    }
9861    return false;
9862}
9863
9864static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9865                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
9866    bool result = true;
9867    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9868    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9869        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
9870            continue;
9871        const DAGNode &node = subpass_to_node[subpass];
9872        // Check for a specified dependency between the two nodes. If one exists we are done.
9873        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9874        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9875        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9876            // If no dependency exits an implicit dependency still might. If not, throw an error.
9877            std::unordered_set<uint32_t> processed_nodes;
9878            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9879                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9880                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9881                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9882                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9883                                     dependent_subpasses[k]);
9884                result = false;
9885            }
9886        }
9887    }
9888    return result;
9889}
9890
9891static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9892                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
9893    const DAGNode &node = subpass_to_node[index];
9894    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9895    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9896    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9897        if (attachment == subpass.pColorAttachments[j].attachment)
9898            return true;
9899    }
9900    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9901        if (attachment == subpass.pDepthStencilAttachment->attachment)
9902            return true;
9903    }
9904    bool result = false;
9905    // Loop through previous nodes and see if any of them write to the attachment.
9906    for (auto elem : node.prev) {
9907        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9908    }
9909    // If the attachment was written to by a previous node than this node needs to preserve it.
9910    if (result && depth > 0) {
9911        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9912        bool has_preserved = false;
9913        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9914            if (subpass.pPreserveAttachments[j] == attachment) {
9915                has_preserved = true;
9916                break;
9917            }
9918        }
9919        if (!has_preserved) {
9920            skip_call |=
9921                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9922                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9923                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9924        }
9925    }
9926    return result;
9927}
9928
9929template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9930    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9931           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9932}
9933
9934bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9935    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9936            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9937}
9938
9939static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
9940                                 RENDER_PASS_STATE const *renderPass) {
9941    bool skip_call = false;
9942    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
9943    auto const pCreateInfo = renderPass->createInfo.ptr();
9944    auto const & subpass_to_node = renderPass->subpassToNode;
9945    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9946    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9947    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9948    // Find overlapping attachments
9949    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9950        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9951            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9952            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9953            if (viewi == viewj) {
9954                overlapping_attachments[i].push_back(j);
9955                overlapping_attachments[j].push_back(i);
9956                continue;
9957            }
9958            auto view_state_i = getImageViewState(dev_data, viewi);
9959            auto view_state_j = getImageViewState(dev_data, viewj);
9960            if (!view_state_i || !view_state_j) {
9961                continue;
9962            }
9963            auto view_ci_i = view_state_i->create_info;
9964            auto view_ci_j = view_state_j->create_info;
9965            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
9966                overlapping_attachments[i].push_back(j);
9967                overlapping_attachments[j].push_back(i);
9968                continue;
9969            }
9970            auto image_data_i = getImageState(dev_data, view_ci_i.image);
9971            auto image_data_j = getImageState(dev_data, view_ci_j.image);
9972            if (!image_data_i || !image_data_j) {
9973                continue;
9974            }
9975            if (image_data_i->binding.mem == image_data_j->binding.mem &&
9976                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
9977                                   image_data_j->binding.size)) {
9978                overlapping_attachments[i].push_back(j);
9979                overlapping_attachments[j].push_back(i);
9980            }
9981        }
9982    }
9983    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9984        uint32_t attachment = i;
9985        for (auto other_attachment : overlapping_attachments[i]) {
9986            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9987                skip_call |=
9988                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9989                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9990                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9991                            attachment, other_attachment);
9992            }
9993            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9994                skip_call |=
9995                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9996                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9997                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9998                            other_attachment, attachment);
9999            }
10000        }
10001    }
10002    // Find for each attachment the subpasses that use them.
10003    unordered_set<uint32_t> attachmentIndices;
10004    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10005        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10006        attachmentIndices.clear();
10007        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10008            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10009            if (attachment == VK_ATTACHMENT_UNUSED)
10010                continue;
10011            input_attachment_to_subpass[attachment].push_back(i);
10012            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10013                input_attachment_to_subpass[overlapping_attachment].push_back(i);
10014            }
10015        }
10016        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10017            uint32_t attachment = subpass.pColorAttachments[j].attachment;
10018            if (attachment == VK_ATTACHMENT_UNUSED)
10019                continue;
10020            output_attachment_to_subpass[attachment].push_back(i);
10021            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10022                output_attachment_to_subpass[overlapping_attachment].push_back(i);
10023            }
10024            attachmentIndices.insert(attachment);
10025        }
10026        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10027            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10028            output_attachment_to_subpass[attachment].push_back(i);
10029            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10030                output_attachment_to_subpass[overlapping_attachment].push_back(i);
10031            }
10032
10033            if (attachmentIndices.count(attachment)) {
10034                skip_call |=
10035                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10036                            DRAWSTATE_INVALID_RENDERPASS, "DS",
10037                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
10038            }
10039        }
10040    }
10041    // If there is a dependency needed make sure one exists
10042    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10043        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10044        // If the attachment is an input then all subpasses that output must have a dependency relationship
10045        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10046            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10047            if (attachment == VK_ATTACHMENT_UNUSED)
10048                continue;
10049            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10050        }
10051        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
10052        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10053            uint32_t attachment = subpass.pColorAttachments[j].attachment;
10054            if (attachment == VK_ATTACHMENT_UNUSED)
10055                continue;
10056            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10057            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10058        }
10059        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10060            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
10061            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10062            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10063        }
10064    }
10065    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
10066    // written.
10067    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10068        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10069        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10070            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
10071        }
10072    }
10073    return skip_call;
10074}
10075// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
10076// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
10077// READ_ONLY layout attachments don't have CLEAR as their loadOp.
10078static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
10079                                                  const uint32_t attachment,
10080                                                  const VkAttachmentDescription &attachment_description) {
10081    bool skip_call = false;
10082    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
10083    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
10084        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
10085            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
10086            skip_call |=
10087                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
10088                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10089                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
10090        }
10091    }
10092    return skip_call;
10093}
10094
10095static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
10096    bool skip = false;
10097
10098    // Track when we're observing the first use of an attachment
10099    std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
10100    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10101        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10102        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10103            auto attach_index = subpass.pColorAttachments[j].attachment;
10104            if (attach_index == VK_ATTACHMENT_UNUSED)
10105                continue;
10106
10107            switch (subpass.pColorAttachments[j].layout) {
10108            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
10109                /* This is ideal. */
10110                break;
10111
10112            case VK_IMAGE_LAYOUT_GENERAL:
10113                /* May not be optimal; TODO: reconsider this warning based on
10114                 * other constraints?
10115                 */
10116                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10117                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10118                                "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
10119                break;
10120
10121            default:
10122                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10123                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10124                                "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
10125                                string_VkImageLayout(subpass.pColorAttachments[j].layout));
10126            }
10127
10128            if (attach_first_use[attach_index]) {
10129                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout,
10130                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
10131            }
10132            attach_first_use[attach_index] = false;
10133        }
10134        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10135            switch (subpass.pDepthStencilAttachment->layout) {
10136            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
10137            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
10138                /* These are ideal. */
10139                break;
10140
10141            case VK_IMAGE_LAYOUT_GENERAL:
10142                /* May not be optimal; TODO: reconsider this warning based on
10143                 * other constraints? GENERAL can be better than doing a bunch
10144                 * of transitions.
10145                 */
10146                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10147                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10148                                "GENERAL layout for depth attachment may not give optimal performance.");
10149                break;
10150
10151            default:
10152                /* No other layouts are acceptable */
10153                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10154                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10155                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
10156                                "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
10157                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
10158            }
10159
10160            auto attach_index = subpass.pDepthStencilAttachment->attachment;
10161            if (attach_first_use[attach_index]) {
10162                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout,
10163                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
10164            }
10165            attach_first_use[attach_index] = false;
10166        }
10167        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10168            auto attach_index = subpass.pInputAttachments[j].attachment;
10169            if (attach_index == VK_ATTACHMENT_UNUSED)
10170                continue;
10171
10172            switch (subpass.pInputAttachments[j].layout) {
10173            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
10174            case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
10175                /* These are ideal. */
10176                break;
10177
10178            case VK_IMAGE_LAYOUT_GENERAL:
10179                /* May not be optimal. TODO: reconsider this warning based on
10180                 * other constraints.
10181                 */
10182                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10183                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10184                                "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
10185                break;
10186
10187            default:
10188                /* No other layouts are acceptable */
10189                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10190                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10191                                "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
10192                                string_VkImageLayout(subpass.pInputAttachments[j].layout));
10193            }
10194
10195            if (attach_first_use[attach_index]) {
10196                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout,
10197                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
10198            }
10199            attach_first_use[attach_index] = false;
10200        }
10201    }
10202    return skip;
10203}
10204
10205static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10206                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
10207    bool skip_call = false;
10208    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10209        DAGNode &subpass_node = subpass_to_node[i];
10210        subpass_node.pass = i;
10211    }
10212    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
10213        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
10214        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
10215            if (dependency.srcSubpass == dependency.dstSubpass) {
10216                skip_call |=
10217                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10218                            DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
10219            }
10220        } else if (dependency.srcSubpass > dependency.dstSubpass) {
10221            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10222                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
10223                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
10224        } else if (dependency.srcSubpass == dependency.dstSubpass) {
10225            has_self_dependency[dependency.srcSubpass] = true;
10226        } else {
10227            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
10228            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
10229        }
10230    }
10231    return skip_call;
10232}
10233
10234
10235VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
10236                                                  const VkAllocationCallbacks *pAllocator,
10237                                                  VkShaderModule *pShaderModule) {
10238    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10239    bool skip_call = false;
10240
10241    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
10242    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
10243    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
10244    spv_diagnostic diag = nullptr;
10245
10246    auto result = spvValidate(ctx, &binary, &diag);
10247    if (result != SPV_SUCCESS) {
10248        skip_call |=
10249            log_msg(dev_data->report_data, result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
10250                    VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
10251                    "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
10252    }
10253
10254    spvDiagnosticDestroy(diag);
10255    spvContextDestroy(ctx);
10256
10257    if (skip_call)
10258        return VK_ERROR_VALIDATION_FAILED_EXT;
10259
10260    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
10261
10262    if (res == VK_SUCCESS) {
10263        std::lock_guard<std::mutex> lock(global_lock);
10264        dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
10265    }
10266    return res;
10267}
10268
10269static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
10270    bool skip_call = false;
10271    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
10272        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10273                             DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
10274                             "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
10275                             type, attachment, attachment_count);
10276    }
10277    return skip_call;
10278}
10279
10280static bool IsPowerOfTwo(unsigned x) {
10281    return x && !(x & (x-1));
10282}
10283
10284static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
10285    bool skip_call = false;
10286    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10287        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10288        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
10289            skip_call |=
10290                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10291                        DRAWSTATE_INVALID_RENDERPASS, "DS",
10292                        "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
10293        }
10294        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10295            uint32_t attachment = subpass.pPreserveAttachments[j];
10296            if (attachment == VK_ATTACHMENT_UNUSED) {
10297                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10298                                     __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
10299                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
10300            } else {
10301                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
10302            }
10303        }
10304
10305        auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
10306            subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
10307            [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
10308
10309        unsigned sample_count = 0;
10310
10311        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10312            uint32_t attachment;
10313            if (subpass.pResolveAttachments) {
10314                attachment = subpass.pResolveAttachments[j].attachment;
10315                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
10316
10317                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
10318                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
10319                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10320                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10321                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
10322                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s",
10323                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
10324                }
10325            }
10326            attachment = subpass.pColorAttachments[j].attachment;
10327            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
10328
10329            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10330                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10331
10332                if (subpass_performs_resolve &&
10333                    pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
10334                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10335                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10336                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
10337                                         "which has VK_SAMPLE_COUNT_1_BIT",
10338                                         i, attachment);
10339                }
10340            }
10341        }
10342
10343        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10344            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10345            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
10346
10347            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10348                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10349            }
10350        }
10351
10352        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10353            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10354            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
10355        }
10356
10357        if (sample_count && !IsPowerOfTwo(sample_count)) {
10358            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10359                                 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10360                                 "CreateRenderPass:  Subpass %u attempts to render to "
10361                                 "attachments with inconsistent sample counts",
10362                                 i);
10363        }
10364    }
10365    return skip_call;
10366}
10367
10368VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10369                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
10370    bool skip_call = false;
10371    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10372
10373    std::unique_lock<std::mutex> lock(global_lock);
10374
10375    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
10376    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
10377    //       ValidateLayouts.
10378    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
10379    lock.unlock();
10380
10381    if (skip_call) {
10382        return VK_ERROR_VALIDATION_FAILED_EXT;
10383    }
10384
10385    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
10386
10387    if (VK_SUCCESS == result) {
10388        lock.lock();
10389
10390        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
10391        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
10392        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
10393
10394        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
10395        render_pass->renderPass = *pRenderPass;
10396        render_pass->hasSelfDependency = has_self_dependency;
10397        render_pass->subpassToNode = subpass_to_node;
10398
10399        // TODO: Maybe fill list and then copy instead of locking
10400        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
10401        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
10402        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10403            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10404            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10405                uint32_t attachment = subpass.pColorAttachments[j].attachment;
10406                if (!attachment_first_read.count(attachment)) {
10407                    attachment_first_read.insert(std::make_pair(attachment, false));
10408                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
10409                }
10410            }
10411            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10412                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10413                if (!attachment_first_read.count(attachment)) {
10414                    attachment_first_read.insert(std::make_pair(attachment, false));
10415                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
10416                }
10417            }
10418            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10419                uint32_t attachment = subpass.pInputAttachments[j].attachment;
10420                if (!attachment_first_read.count(attachment)) {
10421                    attachment_first_read.insert(std::make_pair(attachment, true));
10422                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
10423                }
10424            }
10425        }
10426
10427        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
10428    }
10429    return result;
10430}
10431
10432static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10433    bool skip_call = false;
10434    auto const pRenderPassInfo = getRenderPassState(dev_data, pRenderPassBegin->renderPass)->createInfo.ptr();
10435    auto const & framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
10436    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10437        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10438                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10439                                                                 "with a different number of attachments.");
10440    }
10441    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10442        const VkImageView &image_view = framebufferInfo.pAttachments[i];
10443        auto view_state = getImageViewState(dev_data, image_view);
10444        assert(view_state);
10445        const VkImage &image = view_state->create_info.image;
10446        const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
10447        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10448                                             pRenderPassInfo->pAttachments[i].initialLayout};
10449        // TODO: Do not iterate over every possibility - consolidate where possible
10450        for (uint32_t j = 0; j < subRange.levelCount; j++) {
10451            uint32_t level = subRange.baseMipLevel + j;
10452            for (uint32_t k = 0; k < subRange.layerCount; k++) {
10453                uint32_t layer = subRange.baseArrayLayer + k;
10454                VkImageSubresource sub = {subRange.aspectMask, level, layer};
10455                IMAGE_CMD_BUF_LAYOUT_NODE node;
10456                if (!FindLayout(pCB, image, sub, node)) {
10457                    SetLayout(pCB, image, sub, newNode);
10458                    continue;
10459                }
10460                if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10461                    newNode.layout != node.layout) {
10462                    skip_call |=
10463                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10464                                DRAWSTATE_INVALID_RENDERPASS, "DS",
10465                                "You cannot start a render pass using attachment %u "
10466                                "where the render pass initial layout is %s and the previous "
10467                                "known layout of the attachment is %s. The layouts must match, or "
10468                                "the render pass initial layout for the attachment must be "
10469                                "VK_IMAGE_LAYOUT_UNDEFINED",
10470                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
10471                }
10472            }
10473        }
10474    }
10475    return skip_call;
10476}
10477
10478static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
10479                                          VkAttachmentReference ref) {
10480    if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10481        auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10482        SetLayout(dev_data, pCB, image_view, ref.layout);
10483    }
10484}
10485
10486static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
10487                                     const int subpass_index) {
10488    auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
10489    if (!renderPass)
10490        return;
10491
10492    auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
10493    if (!framebuffer)
10494        return;
10495
10496    auto const &subpass = renderPass->createInfo.pSubpasses[subpass_index];
10497    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10498        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
10499    }
10500    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10501        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
10502    }
10503    if (subpass.pDepthStencilAttachment) {
10504        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
10505    }
10506}
10507
10508static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
10509    bool skip_call = false;
10510    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10511        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10512                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
10513                             cmd_name.c_str());
10514    }
10515    return skip_call;
10516}
10517
10518static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10519    auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
10520    if (!renderPass)
10521        return;
10522
10523    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->createInfo.ptr();
10524    auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
10525    if (!framebuffer)
10526        return;
10527
10528    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10529        auto image_view = framebuffer->createInfo.pAttachments[i];
10530        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10531    }
10532}
10533
10534static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10535    bool skip_call = false;
10536    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
10537        &getFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
10538    if (pRenderPassBegin->renderArea.offset.x < 0 ||
10539        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10540        pRenderPassBegin->renderArea.offset.y < 0 ||
10541        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10542        skip_call |= static_cast<bool>(log_msg(
10543            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10544            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10545            "Cannot execute a render pass with renderArea not within the bound of the "
10546            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10547            "height %d.",
10548            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10549            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10550    }
10551    return skip_call;
10552}
10553
10554// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10555// [load|store]Op flag must be checked
10556// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10557template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10558    if (color_depth_op != op && stencil_op != op) {
10559        return false;
10560    }
10561    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10562    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
10563
10564    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10565            ((check_stencil_load_op == true) && (stencil_op == op)));
10566}
10567
10568VKAPI_ATTR void VKAPI_CALL
10569CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
10570    bool skip_call = false;
10571    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10572    std::unique_lock<std::mutex> lock(global_lock);
10573    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
10574    auto renderPass = pRenderPassBegin ? getRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
10575    auto framebuffer = pRenderPassBegin ? getFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
10576    if (cb_node) {
10577        if (renderPass) {
10578            uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10579            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
10580            for (uint32_t i = 0; i < renderPass->createInfo.attachmentCount; ++i) {
10581                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10582                auto pAttachment = &renderPass->createInfo.pAttachments[i];
10583                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10584                                                         pAttachment->stencilLoadOp,
10585                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10586                    clear_op_size = static_cast<uint32_t>(i) + 1;
10587                    std::function<bool()> function = [=]() {
10588                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
10589                        return false;
10590                    };
10591                    cb_node->validate_functions.push_back(function);
10592                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10593                                                                pAttachment->stencilLoadOp,
10594                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
10595                    std::function<bool()> function = [=]() {
10596                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
10597                        return false;
10598                    };
10599                    cb_node->validate_functions.push_back(function);
10600                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10601                                                                pAttachment->stencilLoadOp,
10602                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
10603                    std::function<bool()> function = [=]() {
10604                        return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
10605                                                          "vkCmdBeginRenderPass()");
10606                    };
10607                    cb_node->validate_functions.push_back(function);
10608                }
10609                if (renderPass->attachment_first_read[i]) {
10610                    std::function<bool()> function = [=]() {
10611                        return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
10612                                                          "vkCmdBeginRenderPass()");
10613                    };
10614                    cb_node->validate_functions.push_back(function);
10615                }
10616            }
10617            if (clear_op_size > pRenderPassBegin->clearValueCount) {
10618                skip_call |=
10619                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10620                            reinterpret_cast<uint64_t &>(renderPass), __LINE__, VALIDATION_ERROR_00442, "DS",
10621                            "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10622                            "be at least %u "
10623                            "entries in pClearValues array to account for the highest index attachment in renderPass 0x%" PRIx64
10624                            " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10625                            "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10626                            "attachments that aren't cleared they will be ignored. %s",
10627                            pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass),
10628                            clear_op_size, clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
10629            }
10630            skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
10631            skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
10632            skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass");
10633            skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
10634            skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass");
10635            skip_call |= addCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10636            cb_node->activeRenderPass = renderPass;
10637            // This is a shallow copy as that is all that is needed for now
10638            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10639            cb_node->activeSubpass = 0;
10640            cb_node->activeSubpassContents = contents;
10641            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
10642            // Connect this framebuffer and its children to this cmdBuffer
10643            AddFramebufferBinding(dev_data, cb_node, framebuffer);
10644            // transition attachments to the correct layouts for the first subpass
10645            TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
10646        } else {
10647            skip_call |=
10648                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10649                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
10650        }
10651    }
10652    lock.unlock();
10653    if (!skip_call) {
10654        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
10655    }
10656}
10657
10658VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10659    bool skip_call = false;
10660    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10661    std::unique_lock<std::mutex> lock(global_lock);
10662    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10663    if (pCB) {
10664        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
10665        skip_call |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10666        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
10667
10668        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
10669        if (pCB->activeSubpass == subpassCount - 1) {
10670            skip_call |=
10671                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10672                        reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10673                        "vkCmdNextSubpass(): Attempted to advance beyond final subpass");
10674        }
10675    }
10676    lock.unlock();
10677
10678    if (skip_call)
10679        return;
10680
10681    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
10682
10683    if (pCB) {
10684      lock.lock();
10685      pCB->activeSubpass++;
10686      pCB->activeSubpassContents = contents;
10687      TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
10688    }
10689}
10690
10691VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
10692    bool skip_call = false;
10693    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10694    std::unique_lock<std::mutex> lock(global_lock);
10695    auto pCB = getCBNode(dev_data, commandBuffer);
10696    if (pCB) {
10697        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
10698        auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
10699        if (rp_state) {
10700            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
10701                skip_call |=
10702                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10703                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10704                            "vkCmdEndRenderPass(): Called before reaching final subpass");
10705            }
10706
10707            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
10708                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10709                auto pAttachment = &rp_state->createInfo.pAttachments[i];
10710                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10711                                                         pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE)) {
10712                    std::function<bool()> function = [=]() {
10713                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
10714                        return false;
10715                    };
10716                    pCB->validate_functions.push_back(function);
10717                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10718                                                                pAttachment->stencilStoreOp,
10719                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
10720                    std::function<bool()> function = [=]() {
10721                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
10722                        return false;
10723                    };
10724                    pCB->validate_functions.push_back(function);
10725                }
10726            }
10727        }
10728        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
10729        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
10730        skip_call |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
10731    }
10732    lock.unlock();
10733
10734    if (skip_call)
10735        return;
10736
10737    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
10738
10739    if (pCB) {
10740        lock.lock();
10741        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
10742        pCB->activeRenderPass = nullptr;
10743        pCB->activeSubpass = 0;
10744        pCB->activeFramebuffer = VK_NULL_HANDLE;
10745    }
10746}
10747
10748static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
10749                                        uint32_t secondaryAttach, const char *msg) {
10750    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10751                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10752                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
10753                   "that is not compatible with the Primary Cmd Buffer current render pass. "
10754                   "Attachment %u is not compatible with %u: %s",
10755                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg);
10756}
10757
10758static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10759                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
10760                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
10761                                            uint32_t secondaryAttach, bool is_multi) {
10762    bool skip_call = false;
10763    if (primaryPassCI->attachmentCount <= primaryAttach) {
10764        primaryAttach = VK_ATTACHMENT_UNUSED;
10765    }
10766    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
10767        secondaryAttach = VK_ATTACHMENT_UNUSED;
10768    }
10769    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
10770        return skip_call;
10771    }
10772    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
10773        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10774                                                 "The first is unused while the second is not.");
10775        return skip_call;
10776    }
10777    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
10778        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10779                                                 "The second is unused while the first is not.");
10780        return skip_call;
10781    }
10782    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
10783        skip_call |=
10784            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
10785    }
10786    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
10787        skip_call |=
10788            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
10789    }
10790    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
10791        skip_call |=
10792            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
10793    }
10794    return skip_call;
10795}
10796
10797static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10798                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10799                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
10800    bool skip_call = false;
10801    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
10802    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
10803    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
10804    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
10805        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
10806        if (i < primary_desc.inputAttachmentCount) {
10807            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
10808        }
10809        if (i < secondary_desc.inputAttachmentCount) {
10810            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
10811        }
10812        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
10813                                                     secondaryPassCI, secondary_input_attach, is_multi);
10814    }
10815    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
10816    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
10817        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
10818        if (i < primary_desc.colorAttachmentCount) {
10819            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
10820        }
10821        if (i < secondary_desc.colorAttachmentCount) {
10822            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
10823        }
10824        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
10825                                                     secondaryPassCI, secondary_color_attach, is_multi);
10826        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
10827        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
10828            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
10829        }
10830        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
10831            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
10832        }
10833        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
10834                                                     secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
10835    }
10836    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
10837    if (primary_desc.pDepthStencilAttachment) {
10838        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
10839    }
10840    if (secondary_desc.pDepthStencilAttachment) {
10841        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
10842    }
10843    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
10844                                                 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
10845    return skip_call;
10846}
10847
10848// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
10849//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
10850//  will then feed into this function
10851static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10852                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10853                                            VkRenderPassCreateInfo const *secondaryPassCI) {
10854    bool skip_call = false;
10855
10856    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
10857        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10858                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10859                             "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10860                             " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
10861                             " that has a subpassCount of %u.",
10862                             reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
10863                             reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
10864    } else {
10865        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
10866            skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
10867                                                      primaryPassCI->subpassCount > 1);
10868        }
10869    }
10870    return skip_call;
10871}
10872
10873static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10874                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
10875    bool skip_call = false;
10876    if (!pSubCB->beginInfo.pInheritanceInfo) {
10877        return skip_call;
10878    }
10879    VkFramebuffer primary_fb = pCB->activeFramebuffer;
10880    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10881    if (secondary_fb != VK_NULL_HANDLE) {
10882        if (primary_fb != secondary_fb) {
10883            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10884                                 DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10885                                 "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
10886                                 " which has a framebuffer 0x%" PRIx64
10887                                 " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ".",
10888                                 reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
10889                                 reinterpret_cast<uint64_t &>(primary_fb));
10890        }
10891        auto fb = getFramebufferState(dev_data, secondary_fb);
10892        if (!fb) {
10893            skip_call |=
10894                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10895                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10896                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
10897                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10898            return skip_call;
10899        }
10900        auto cb_renderpass = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10901        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
10902            skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
10903                                                         cb_renderpass->createInfo.ptr());
10904        }
10905    }
10906    return skip_call;
10907}
10908
10909static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10910    bool skip_call = false;
10911    unordered_set<int> activeTypes;
10912    for (auto queryObject : pCB->activeQueries) {
10913        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10914        if (queryPoolData != dev_data->queryPoolMap.end()) {
10915            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10916                pSubCB->beginInfo.pInheritanceInfo) {
10917                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10918                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10919                    skip_call |= log_msg(
10920                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10921                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10922                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10923                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
10924                        "buffer must have all bits set on the queryPool.",
10925                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
10926                }
10927            }
10928            activeTypes.insert(queryPoolData->second.createInfo.queryType);
10929        }
10930    }
10931    for (auto queryObject : pSubCB->startedQueries) {
10932        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10933        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10934            skip_call |=
10935                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10936                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10937                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10938                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
10939                        "secondary Cmd Buffer 0x%p.",
10940                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10941                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10942        }
10943    }
10944
10945    auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
10946    auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
10947    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10948        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10949                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
10950                             "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIxLEAST64
10951                             " created in queue family %d has secondary command buffer 0x%" PRIxLEAST64 " created in queue family %d.",
10952                             reinterpret_cast<uint64_t>(pCB->commandBuffer), primary_pool->queueFamilyIndex,
10953                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), secondary_pool->queueFamilyIndex);
10954    }
10955
10956    return skip_call;
10957}
10958
10959VKAPI_ATTR void VKAPI_CALL
10960CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10961    bool skip_call = false;
10962    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10963    std::unique_lock<std::mutex> lock(global_lock);
10964    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10965    if (pCB) {
10966        GLOBAL_CB_NODE *pSubCB = NULL;
10967        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10968            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10969            if (!pSubCB) {
10970                skip_call |=
10971                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10972                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10973                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
10974                            (void *)pCommandBuffers[i], i);
10975            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10976                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10977                                     __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10978                                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
10979                                     "array. All cmd buffers in pCommandBuffers array must be secondary.",
10980                                     (void *)pCommandBuffers[i], i);
10981            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10982                auto secondary_rp_state = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10983                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10984                    skip_call |= log_msg(
10985                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10986                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10987                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
10988                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10989                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
10990                } else {
10991                    // Make sure render pass is compatible with parent command buffer pass if has continue
10992                    if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
10993                        skip_call |=
10994                            validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
10995                                                            pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
10996                    }
10997                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10998                    skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10999                }
11000                string errorString = "";
11001                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
11002                if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
11003                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
11004                                                     secondary_rp_state->createInfo.ptr(), errorString)) {
11005                    skip_call |= log_msg(
11006                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11007                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
11008                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
11009                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
11010                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
11011                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
11012                }
11013            }
11014            // TODO(mlentine): Move more logic into this method
11015            skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
11016            skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
11017            // Secondary cmdBuffers are considered pending execution starting w/
11018            // being recorded
11019            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
11020                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
11021                    skip_call |= log_msg(
11022                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11023                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
11024                        "Attempt to simultaneously execute command buffer 0x%" PRIxLEAST64
11025                        " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!",
11026                        (uint64_t)(pCB->commandBuffer));
11027                }
11028                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
11029                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
11030                    skip_call |= log_msg(
11031                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11032                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
11033                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
11034                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
11035                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
11036                        "set, even though it does.",
11037                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
11038                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
11039                }
11040            }
11041            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
11042                skip_call |=
11043                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11044                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
11045                            "vkCmdExecuteCommands(): Secondary Command Buffer "
11046                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
11047                            "flight and inherited queries not "
11048                            "supported on this device.",
11049                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
11050            }
11051            // Propagate layout transitions to the primary cmd buffer
11052            for (auto ilm_entry : pSubCB->imageLayoutMap) {
11053                SetLayout(pCB, ilm_entry.first, ilm_entry.second);
11054            }
11055            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
11056            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
11057            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
11058            for (auto &function : pSubCB->queryUpdates) {
11059                pCB->queryUpdates.push_back(function);
11060            }
11061        }
11062        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
11063        skip_call |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
11064    }
11065    lock.unlock();
11066    if (!skip_call)
11067        dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
11068}
11069
11070// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
11071static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
11072                                    VkDeviceSize end_offset) {
11073    bool skip_call = false;
11074    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11075    // Iterate over all bound image ranges and verify that for any that overlap the
11076    //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
11077    // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
11078    for (auto image_handle : mem_info->bound_images) {
11079        auto img_it = mem_info->bound_ranges.find(image_handle);
11080        if (img_it != mem_info->bound_ranges.end()) {
11081            if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
11082                std::vector<VkImageLayout> layouts;
11083                if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
11084                    for (auto layout : layouts) {
11085                        if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
11086                            skip_call |=
11087                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
11088                                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
11089                                                                                        "GENERAL or PREINITIALIZED are supported.",
11090                                        string_VkImageLayout(layout));
11091                        }
11092                    }
11093                }
11094            }
11095        }
11096    }
11097    return skip_call;
11098}
11099
11100VKAPI_ATTR VkResult VKAPI_CALL
11101MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
11102    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11103
11104    bool skip_call = false;
11105    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11106    std::unique_lock<std::mutex> lock(global_lock);
11107    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
11108    if (mem_info) {
11109        // TODO : This could me more fine-grained to track just region that is valid
11110        mem_info->global_valid = true;
11111        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
11112        skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
11113        // TODO : Do we need to create new "bound_range" for the mapped range?
11114        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
11115        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
11116             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
11117            skip_call =
11118                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11119                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
11120                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
11121        }
11122    }
11123    skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
11124    lock.unlock();
11125
11126    if (!skip_call) {
11127        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
11128        if (VK_SUCCESS == result) {
11129            lock.lock();
11130            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
11131            storeMemRanges(dev_data, mem, offset, size);
11132            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
11133            lock.unlock();
11134        }
11135    }
11136    return result;
11137}
11138
11139VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
11140    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11141    bool skip_call = false;
11142
11143    std::unique_lock<std::mutex> lock(global_lock);
11144    skip_call |= deleteMemRanges(dev_data, mem);
11145    lock.unlock();
11146    if (!skip_call) {
11147        dev_data->dispatch_table.UnmapMemory(device, mem);
11148    }
11149}
11150
11151static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
11152                                   const VkMappedMemoryRange *pMemRanges) {
11153    bool skip_call = false;
11154    for (uint32_t i = 0; i < memRangeCount; ++i) {
11155        auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
11156        if (mem_info) {
11157            if (mem_info->mem_range.offset > pMemRanges[i].offset) {
11158                skip_call |=
11159                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11160                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
11161                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
11162                            "(" PRINTF_SIZE_T_SPECIFIER ").",
11163                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
11164            }
11165
11166            const uint64_t dev_dataTerminus = (mem_info->mem_range.size == VK_WHOLE_SIZE)
11167                                                  ? mem_info->alloc_info.allocationSize
11168                                                  : (mem_info->mem_range.offset + mem_info->mem_range.size);
11169            if (pMemRanges[i].size != VK_WHOLE_SIZE && (dev_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
11170                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11171                                     VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
11172                                     MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
11173                                                                  ") exceeds the Memory Object's upper-bound "
11174                                                                  "(" PRINTF_SIZE_T_SPECIFIER ").",
11175                                     funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
11176                                     static_cast<size_t>(dev_dataTerminus));
11177            }
11178        }
11179    }
11180    return skip_call;
11181}
11182
11183static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t memRangeCount,
11184                                                     const VkMappedMemoryRange *pMemRanges) {
11185    bool skip_call = false;
11186    for (uint32_t i = 0; i < memRangeCount; ++i) {
11187        auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
11188        if (mem_info) {
11189            if (mem_info->shadow_copy) {
11190                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11191                                        ? mem_info->mem_range.size
11192                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
11193                char *data = static_cast<char *>(mem_info->shadow_copy);
11194                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
11195                    if (data[j] != NoncoherentMemoryFillValue) {
11196                        skip_call |= log_msg(
11197                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11198                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
11199                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
11200                    }
11201                }
11202                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
11203                    if (data[j] != NoncoherentMemoryFillValue) {
11204                        skip_call |= log_msg(
11205                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11206                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
11207                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
11208                    }
11209                }
11210                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
11211            }
11212        }
11213    }
11214    return skip_call;
11215}
11216
11217static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t memory_range_count,
11218                                            const VkMappedMemoryRange *mem_ranges) {
11219    for (uint32_t i = 0; i < memory_range_count; ++i) {
11220        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
11221        if (mem_info && mem_info->shadow_copy) {
11222            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11223                                    ? mem_info->mem_range.size
11224                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
11225            char *data = static_cast<char *>(mem_info->shadow_copy);
11226            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
11227        }
11228    }
11229}
11230
11231VKAPI_ATTR VkResult VKAPI_CALL
11232FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
11233    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11234    bool skip_call = false;
11235    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11236
11237    std::unique_lock<std::mutex> lock(global_lock);
11238    skip_call |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, memRangeCount, pMemRanges);
11239    skip_call |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
11240    lock.unlock();
11241    if (!skip_call) {
11242        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
11243    }
11244    return result;
11245}
11246
11247VKAPI_ATTR VkResult VKAPI_CALL
11248InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
11249    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11250    bool skip_call = false;
11251    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11252
11253    std::unique_lock<std::mutex> lock(global_lock);
11254    skip_call |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
11255    lock.unlock();
11256    if (!skip_call) {
11257        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
11258        // Update our shadow copy with modified driver data
11259        CopyNoncoherentMemoryFromDriver(dev_data, memRangeCount, pMemRanges);
11260    }
11261    return result;
11262}
11263
11264VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
11265    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11266    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11267    bool skip_call = false;
11268    std::unique_lock<std::mutex> lock(global_lock);
11269    auto image_state = getImageState(dev_data, image);
11270    if (image_state) {
11271        // Track objects tied to memory
11272        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
11273        skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
11274        VkMemoryRequirements memRequirements;
11275        lock.unlock();
11276        dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &memRequirements);
11277        lock.lock();
11278
11279        // Track and validate bound memory range information
11280        auto mem_info = getMemObjInfo(dev_data, mem);
11281        if (mem_info) {
11282            skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, memRequirements,
11283                                                image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
11284            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
11285        }
11286
11287        print_mem_list(dev_data);
11288        lock.unlock();
11289        if (!skip_call) {
11290            result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
11291            lock.lock();
11292            image_state->binding.mem = mem;
11293            image_state->binding.offset = memoryOffset;
11294            image_state->binding.size = memRequirements.size;
11295            lock.unlock();
11296        }
11297    } else {
11298        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11299                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
11300                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
11301                reinterpret_cast<const uint64_t &>(image));
11302    }
11303    return result;
11304}
11305
11306VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
11307    bool skip_call = false;
11308    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11309    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11310    std::unique_lock<std::mutex> lock(global_lock);
11311    auto event_state = getEventNode(dev_data, event);
11312    if (event_state) {
11313        event_state->needsSignaled = false;
11314        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
11315        if (event_state->write_in_use) {
11316            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
11317                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11318                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
11319                                 reinterpret_cast<const uint64_t &>(event));
11320        }
11321    }
11322    lock.unlock();
11323    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
11324    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
11325    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
11326    for (auto queue_data : dev_data->queueMap) {
11327        auto event_entry = queue_data.second.eventToStageMap.find(event);
11328        if (event_entry != queue_data.second.eventToStageMap.end()) {
11329            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
11330        }
11331    }
11332    if (!skip_call)
11333        result = dev_data->dispatch_table.SetEvent(device, event);
11334    return result;
11335}
11336
11337VKAPI_ATTR VkResult VKAPI_CALL
11338QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
11339    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11340    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11341    bool skip_call = false;
11342    std::unique_lock<std::mutex> lock(global_lock);
11343    auto pFence = getFenceNode(dev_data, fence);
11344    auto pQueue = getQueueNode(dev_data, queue);
11345
11346    // First verify that fence is not in use
11347    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11348
11349    if (pFence) {
11350        SubmitFence(pQueue, pFence, bindInfoCount);
11351    }
11352
11353    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11354        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
11355        // Track objects tied to memory
11356        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
11357            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
11358                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
11359                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11360                                        (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
11361                                        "vkQueueBindSparse"))
11362                    skip_call = true;
11363            }
11364        }
11365        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
11366            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
11367                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
11368                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11369                                        (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11370                                        "vkQueueBindSparse"))
11371                    skip_call = true;
11372            }
11373        }
11374        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
11375            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
11376                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
11377                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
11378                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
11379                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
11380                                        (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11381                                        "vkQueueBindSparse"))
11382                    skip_call = true;
11383            }
11384        }
11385
11386        std::vector<SEMAPHORE_WAIT> semaphore_waits;
11387        std::vector<VkSemaphore> semaphore_signals;
11388        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11389            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11390            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11391            if (pSemaphore) {
11392                if (pSemaphore->signaled) {
11393                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11394                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11395                        pSemaphore->in_use.fetch_add(1);
11396                    }
11397                    pSemaphore->signaler.first = VK_NULL_HANDLE;
11398                    pSemaphore->signaled = false;
11399                } else {
11400                    skip_call |=
11401                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11402                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11403                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
11404                                " that has no way to be signaled.",
11405                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11406                }
11407            }
11408        }
11409        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11410            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11411            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11412            if (pSemaphore) {
11413                if (pSemaphore->signaled) {
11414                    skip_call =
11415                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11416                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11417                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
11418                                ", but that semaphore is already signaled.",
11419                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11420                }
11421                else {
11422                    pSemaphore->signaler.first = queue;
11423                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11424                    pSemaphore->signaled = true;
11425                    pSemaphore->in_use.fetch_add(1);
11426                    semaphore_signals.push_back(semaphore);
11427                }
11428            }
11429        }
11430
11431        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11432                                         semaphore_waits,
11433                                         semaphore_signals,
11434                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11435    }
11436
11437    if (pFence && !bindInfoCount) {
11438        // No work to do, just dropping a fence in the queue by itself.
11439        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11440                                         std::vector<SEMAPHORE_WAIT>(),
11441                                         std::vector<VkSemaphore>(),
11442                                         fence);
11443    }
11444
11445    print_mem_list(dev_data);
11446    lock.unlock();
11447
11448    if (!skip_call)
11449        return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
11450
11451    return result;
11452}
11453
11454VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11455                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
11456    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11457    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
11458    if (result == VK_SUCCESS) {
11459        std::lock_guard<std::mutex> lock(global_lock);
11460        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
11461        sNode->signaler.first = VK_NULL_HANDLE;
11462        sNode->signaler.second = 0;
11463        sNode->signaled = false;
11464    }
11465    return result;
11466}
11467
11468VKAPI_ATTR VkResult VKAPI_CALL
11469CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
11470    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11471    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
11472    if (result == VK_SUCCESS) {
11473        std::lock_guard<std::mutex> lock(global_lock);
11474        dev_data->eventMap[*pEvent].needsSignaled = false;
11475        dev_data->eventMap[*pEvent].write_in_use = 0;
11476        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11477    }
11478    return result;
11479}
11480
11481static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, VkSwapchainCreateInfoKHR const *pCreateInfo,
11482                                              SURFACE_STATE *surface_state, SWAPCHAIN_NODE *old_swapchain_state) {
11483    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
11484
11485    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
11486        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11487                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
11488                    "vkCreateSwapchainKHR(): surface has an existing swapchain other than oldSwapchain"))
11489            return true;
11490    }
11491    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
11492        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11493                    reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
11494                    "DS", "vkCreateSwapchainKHR(): pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface"))
11495            return true;
11496    }
11497
11498    return false;
11499}
11500
11501VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
11502                                                  const VkAllocationCallbacks *pAllocator,
11503                                                  VkSwapchainKHR *pSwapchain) {
11504    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11505    auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface);
11506    auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
11507
11508    if (PreCallValidateCreateSwapchainKHR(dev_data, pCreateInfo, surface_state, old_swapchain_state))
11509        return VK_ERROR_VALIDATION_FAILED_EXT;
11510
11511    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
11512
11513    if (VK_SUCCESS == result) {
11514        std::lock_guard<std::mutex> lock(global_lock);
11515        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
11516        surface_state->swapchain = swapchain_state.get();
11517        dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
11518    } else {
11519        surface_state->swapchain = nullptr;
11520    }
11521
11522    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
11523    surface_state->old_swapchain = old_swapchain_state;
11524
11525    return result;
11526}
11527
11528VKAPI_ATTR void VKAPI_CALL
11529DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
11530    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11531    bool skip_call = false;
11532
11533    std::unique_lock<std::mutex> lock(global_lock);
11534    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11535    if (swapchain_data) {
11536        if (swapchain_data->images.size() > 0) {
11537            for (auto swapchain_image : swapchain_data->images) {
11538                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
11539                if (image_sub != dev_data->imageSubresourceMap.end()) {
11540                    for (auto imgsubpair : image_sub->second) {
11541                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
11542                        if (image_item != dev_data->imageLayoutMap.end()) {
11543                            dev_data->imageLayoutMap.erase(image_item);
11544                        }
11545                    }
11546                    dev_data->imageSubresourceMap.erase(image_sub);
11547                }
11548                skip_call =
11549                    ClearMemoryObjectBindings(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
11550                dev_data->imageMap.erase(swapchain_image);
11551            }
11552        }
11553
11554        auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
11555        if (surface_state) {
11556            if (surface_state->swapchain == swapchain_data)
11557                surface_state->swapchain = nullptr;
11558            if (surface_state->old_swapchain == swapchain_data)
11559                surface_state->old_swapchain = nullptr;
11560        }
11561
11562        dev_data->device_extensions.swapchainMap.erase(swapchain);
11563    }
11564    lock.unlock();
11565    if (!skip_call)
11566        dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
11567}
11568
11569VKAPI_ATTR VkResult VKAPI_CALL
11570GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
11571    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11572    VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
11573
11574    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
11575        // This should never happen and is checked by param checker.
11576        if (!pCount)
11577            return result;
11578        std::lock_guard<std::mutex> lock(global_lock);
11579        const size_t count = *pCount;
11580        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
11581        if (swapchain_node && !swapchain_node->images.empty()) {
11582            // TODO : Not sure I like the memcmp here, but it works
11583            const bool mismatch = (swapchain_node->images.size() != count ||
11584                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
11585            if (mismatch) {
11586                // TODO: Verify against Valid Usage section of extension
11587                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11588                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
11589                        "vkGetSwapchainInfoKHR(0x%" PRIx64
11590                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
11591                        (uint64_t)(swapchain));
11592            }
11593        }
11594        for (uint32_t i = 0; i < *pCount; ++i) {
11595            IMAGE_LAYOUT_NODE image_layout_node;
11596            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
11597            image_layout_node.format = swapchain_node->createInfo.imageFormat;
11598            // Add imageMap entries for each swapchain image
11599            VkImageCreateInfo image_ci = {};
11600            image_ci.mipLevels = 1;
11601            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
11602            image_ci.usage = swapchain_node->createInfo.imageUsage;
11603            image_ci.format = swapchain_node->createInfo.imageFormat;
11604            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
11605            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
11606            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
11607            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
11608            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
11609            auto &image_state = dev_data->imageMap[pSwapchainImages[i]];
11610            image_state->valid = false;
11611            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
11612            swapchain_node->images.push_back(pSwapchainImages[i]);
11613            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
11614            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
11615            dev_data->imageLayoutMap[subpair] = image_layout_node;
11616            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
11617        }
11618    }
11619    return result;
11620}
11621
11622VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
11623    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11624    bool skip_call = false;
11625
11626    std::lock_guard<std::mutex> lock(global_lock);
11627    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11628        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11629        if (pSemaphore && !pSemaphore->signaled) {
11630            skip_call |=
11631                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11632                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11633                            "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11634                            reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
11635        }
11636    }
11637
11638    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11639        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11640        if (swapchain_data) {
11641            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
11642                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11643                                     reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
11644                                     "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
11645                                     pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
11646            }
11647            else {
11648                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11649                auto image_state = getImageState(dev_data, image);
11650                skip_call |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
11651
11652                if (!image_state->acquired) {
11653                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11654                                         reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
11655                                         "DS", "vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
11656                                         pPresentInfo->pImageIndices[i]);
11657                }
11658
11659                vector<VkImageLayout> layouts;
11660                if (FindLayouts(dev_data, image, layouts)) {
11661                    for (auto layout : layouts) {
11662                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
11663                            skip_call |=
11664                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
11665                                            reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
11666                                            "Images passed to present must be in layout "
11667                                            "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s",
11668                                            string_VkImageLayout(layout));
11669                        }
11670                    }
11671                }
11672            }
11673        }
11674    }
11675
11676    if (skip_call) {
11677        return VK_ERROR_VALIDATION_FAILED_EXT;
11678    }
11679
11680    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
11681
11682    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
11683        // Semaphore waits occur before error generation, if the call reached
11684        // the ICD. (Confirm?)
11685        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11686            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11687            if (pSemaphore) {
11688                pSemaphore->signaler.first = VK_NULL_HANDLE;
11689                pSemaphore->signaled = false;
11690            }
11691        }
11692
11693        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11694            // Note: this is imperfect, in that we can get confused about what
11695            // did or didn't succeed-- but if the app does that, it's confused
11696            // itself just as much.
11697            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
11698
11699            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR)
11700                continue; // this present didn't actually happen.
11701
11702            // Mark the image as having been released to the WSI
11703            auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11704            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11705            auto image_state = getImageState(dev_data, image);
11706            image_state->acquired = false;
11707        }
11708
11709        // Note: even though presentation is directed to a queue, there is no
11710        // direct ordering between QP and subsequent work, so QP (and its
11711        // semaphore waits) /never/ participate in any completion proof.
11712    }
11713
11714    return result;
11715}
11716
11717VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11718                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
11719                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11720    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11721    std::unique_lock<std::mutex> lock(global_lock);
11722    VkResult result =
11723        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11724    return result;
11725}
11726
11727VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11728                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11729    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11730    bool skip_call = false;
11731
11732    std::unique_lock<std::mutex> lock(global_lock);
11733
11734    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
11735        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11736                             reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
11737                             "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
11738                             "to determine the completion of this operation.");
11739    }
11740
11741    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11742    if (pSemaphore && pSemaphore->signaled) {
11743        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11744                             reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11745                             "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
11746    }
11747
11748    auto pFence = getFenceNode(dev_data, fence);
11749    if (pFence) {
11750        skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11751    }
11752    lock.unlock();
11753
11754    if (skip_call)
11755        return VK_ERROR_VALIDATION_FAILED_EXT;
11756
11757    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11758
11759    lock.lock();
11760    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11761        if (pFence) {
11762            pFence->state = FENCE_INFLIGHT;
11763            pFence->signaler.first = VK_NULL_HANDLE;   // ANI isn't on a queue, so this can't participate in a completion proof.
11764        }
11765
11766        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
11767        if (pSemaphore) {
11768            pSemaphore->signaled = true;
11769            pSemaphore->signaler.first = VK_NULL_HANDLE;
11770        }
11771
11772        // Mark the image as acquired.
11773        auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11774        auto image = swapchain_data->images[*pImageIndex];
11775        auto image_state = getImageState(dev_data, image);
11776        image_state->acquired = true;
11777    }
11778    lock.unlock();
11779
11780    return result;
11781}
11782
11783VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11784                                                        VkPhysicalDevice *pPhysicalDevices) {
11785    bool skip_call = false;
11786    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11787
11788    if (instance_data) {
11789        // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11790        if (NULL == pPhysicalDevices) {
11791            instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11792        } else {
11793            if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
11794                // Flag warning here. You can call this without having queried the count, but it may not be
11795                // robust on platforms with multiple physical devices.
11796                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11797                                     0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11798                                     "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
11799                                     "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11800            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11801            else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
11802                // Having actual count match count from app is not a requirement, so this can be a warning
11803                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11804                                     VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11805                                     "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
11806                                     "supported by this instance is %u.",
11807                                     *pPhysicalDeviceCount, instance_data->physical_devices_count);
11808            }
11809            instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11810        }
11811        if (skip_call) {
11812            return VK_ERROR_VALIDATION_FAILED_EXT;
11813        }
11814        VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11815        if (NULL == pPhysicalDevices) {
11816            instance_data->physical_devices_count = *pPhysicalDeviceCount;
11817        } else if (result == VK_SUCCESS){ // Save physical devices
11818            for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11819                auto & phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11820                phys_device_state.phys_device = pPhysicalDevices[i];
11821                // Init actual features for each physical device
11822                instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11823            }
11824        }
11825        return result;
11826    } else {
11827        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
11828                DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().",
11829                (uint64_t)instance);
11830    }
11831    return VK_ERROR_VALIDATION_FAILED_EXT;
11832}
11833
11834VKAPI_ATTR void VKAPI_CALL
11835GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11836    VkQueueFamilyProperties *pQueueFamilyProperties) {
11837    bool skip_call = false;
11838    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11839    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11840    if (physical_device_state) {
11841        if (!pQueueFamilyProperties) {
11842            physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11843        }
11844        else {
11845            // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
11846            // get count
11847            if (UNCALLED == physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11848                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11849                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11850                    "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
11851                    "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
11852                    "NULL pQueueFamilyProperties to query pCount.");
11853            }
11854            // Then verify that pCount that is passed in on second call matches what was returned
11855            if (physical_device_state->queueFamilyPropertiesCount != *pCount) {
11856
11857                // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
11858                // provide as warning
11859                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11860                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11861                    "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
11862                    "supported by this physicalDevice is %u.",
11863                    *pCount, physical_device_state->queueFamilyPropertiesCount);
11864            }
11865            physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11866        }
11867        if (skip_call) {
11868            return;
11869        }
11870        instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
11871        if (!pQueueFamilyProperties) {
11872            physical_device_state->queueFamilyPropertiesCount = *pCount;
11873        }
11874        else { // Save queue family properties
11875            if (physical_device_state->queue_family_properties.size() < *pCount)
11876                physical_device_state->queue_family_properties.resize(*pCount);
11877            for (uint32_t i = 0; i < *pCount; i++) {
11878                physical_device_state->queue_family_properties[i] = pQueueFamilyProperties[i];
11879            }
11880        }
11881    }
11882    else {
11883        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
11884            __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
11885            "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().",
11886            (uint64_t)physicalDevice);
11887    }
11888}
11889
11890template<typename TCreateInfo, typename FPtr>
11891static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo,
11892                              VkAllocationCallbacks const *pAllocator, VkSurfaceKHR *pSurface,
11893                              FPtr fptr)
11894{
11895    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11896
11897    // Call down the call chain:
11898    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11899
11900    if (result == VK_SUCCESS) {
11901        std::unique_lock<std::mutex> lock(global_lock);
11902        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11903        lock.unlock();
11904    }
11905
11906    return result;
11907}
11908
11909VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11910    bool skip_call = false;
11911    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11912    std::unique_lock<std::mutex> lock(global_lock);
11913    auto surface_state = getSurfaceState(instance_data, surface);
11914
11915    if (surface_state) {
11916        // TODO: track swapchains created from this surface.
11917        instance_data->surface_map.erase(surface);
11918    }
11919    lock.unlock();
11920
11921    if (!skip_call) {
11922        // Call down the call chain:
11923        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11924    }
11925}
11926
11927#ifdef VK_USE_PLATFORM_ANDROID_KHR
11928VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11929                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11930    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11931}
11932#endif // VK_USE_PLATFORM_ANDROID_KHR
11933
11934#ifdef VK_USE_PLATFORM_MIR_KHR
11935VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11936                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11937    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11938}
11939#endif // VK_USE_PLATFORM_MIR_KHR
11940
11941#ifdef VK_USE_PLATFORM_WAYLAND_KHR
11942VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11943                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11944    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11945}
11946#endif // VK_USE_PLATFORM_WAYLAND_KHR
11947
11948#ifdef VK_USE_PLATFORM_WIN32_KHR
11949VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11950                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11951    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11952}
11953#endif // VK_USE_PLATFORM_WIN32_KHR
11954
11955#ifdef VK_USE_PLATFORM_XCB_KHR
11956VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11957                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11958    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11959}
11960#endif // VK_USE_PLATFORM_XCB_KHR
11961
11962#ifdef VK_USE_PLATFORM_XLIB_KHR
11963VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11964                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11965    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11966}
11967#endif // VK_USE_PLATFORM_XLIB_KHR
11968
11969
11970VKAPI_ATTR VkResult VKAPI_CALL
11971CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11972                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11973    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11974    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11975    if (VK_SUCCESS == res) {
11976        std::lock_guard<std::mutex> lock(global_lock);
11977        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11978    }
11979    return res;
11980}
11981
11982VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
11983                                                         VkDebugReportCallbackEXT msgCallback,
11984                                                         const VkAllocationCallbacks *pAllocator) {
11985    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11986    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11987    std::lock_guard<std::mutex> lock(global_lock);
11988    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
11989}
11990
11991VKAPI_ATTR void VKAPI_CALL
11992DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11993                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11994    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11995    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11996}
11997
11998VKAPI_ATTR VkResult VKAPI_CALL
11999EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12000    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12001}
12002
12003VKAPI_ATTR VkResult VKAPI_CALL
12004EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
12005    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12006}
12007
12008VKAPI_ATTR VkResult VKAPI_CALL
12009EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
12010    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12011        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
12012
12013    return VK_ERROR_LAYER_NOT_PRESENT;
12014}
12015
12016VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12017                                                                  const char *pLayerName, uint32_t *pCount,
12018                                                                  VkExtensionProperties *pProperties) {
12019    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12020        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
12021
12022    assert(physicalDevice);
12023
12024    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12025    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
12026}
12027
12028static PFN_vkVoidFunction
12029intercept_core_instance_command(const char *name);
12030
12031static PFN_vkVoidFunction
12032intercept_core_device_command(const char *name);
12033
12034static PFN_vkVoidFunction
12035intercept_khr_swapchain_command(const char *name, VkDevice dev);
12036
12037static PFN_vkVoidFunction
12038intercept_khr_surface_command(const char *name, VkInstance instance);
12039
12040VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
12041    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
12042    if (proc)
12043        return proc;
12044
12045    assert(dev);
12046
12047    proc = intercept_khr_swapchain_command(funcName, dev);
12048    if (proc)
12049        return proc;
12050
12051    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
12052
12053    auto &table = dev_data->dispatch_table;
12054    if (!table.GetDeviceProcAddr)
12055        return nullptr;
12056    return table.GetDeviceProcAddr(dev, funcName);
12057}
12058
12059VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
12060    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
12061    if (!proc)
12062        proc = intercept_core_device_command(funcName);
12063    if (!proc)
12064        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
12065    if (!proc)
12066        proc = intercept_khr_surface_command(funcName, instance);
12067    if (proc)
12068        return proc;
12069
12070    assert(instance);
12071
12072    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12073    proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
12074    if (proc)
12075        return proc;
12076
12077    auto &table = instance_data->dispatch_table;
12078    if (!table.GetInstanceProcAddr)
12079        return nullptr;
12080    return table.GetInstanceProcAddr(instance, funcName);
12081}
12082
12083static PFN_vkVoidFunction
12084intercept_core_instance_command(const char *name) {
12085    static const struct {
12086        const char *name;
12087        PFN_vkVoidFunction proc;
12088    } core_instance_commands[] = {
12089        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
12090        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
12091        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
12092        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
12093        { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
12094        { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
12095        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
12096        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
12097        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
12098        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
12099        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
12100    };
12101
12102    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
12103        if (!strcmp(core_instance_commands[i].name, name))
12104            return core_instance_commands[i].proc;
12105    }
12106
12107    return nullptr;
12108}
12109
12110static PFN_vkVoidFunction
12111intercept_core_device_command(const char *name) {
12112    static const struct {
12113        const char *name;
12114        PFN_vkVoidFunction proc;
12115    } core_device_commands[] = {
12116        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
12117        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
12118        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
12119        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
12120        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
12121        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
12122        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
12123        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
12124        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
12125        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
12126        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
12127        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
12128        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
12129        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
12130        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
12131        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
12132        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
12133        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
12134        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
12135        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
12136        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
12137        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
12138        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
12139        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
12140        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
12141        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
12142        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
12143        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
12144        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
12145        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
12146        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
12147        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
12148        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
12149        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
12150        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
12151        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
12152        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
12153        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
12154        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
12155        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
12156        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
12157        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
12158        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
12159        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
12160        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
12161        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
12162        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
12163        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
12164        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
12165        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
12166        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
12167        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
12168        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
12169        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
12170        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
12171        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
12172        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
12173        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
12174        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
12175        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
12176        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
12177        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
12178        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
12179        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
12180        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
12181        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
12182        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
12183        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
12184        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
12185        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
12186        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
12187        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
12188        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
12189        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
12190        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
12191        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
12192        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
12193        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
12194        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
12195        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
12196        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
12197        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
12198        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
12199        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
12200        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
12201        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
12202        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
12203        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
12204        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
12205        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
12206        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
12207        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
12208        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
12209        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
12210        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
12211        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
12212        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
12213        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
12214        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
12215        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
12216        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
12217        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
12218        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
12219        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
12220        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
12221        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
12222        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
12223        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
12224        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
12225        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
12226        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
12227        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
12228        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
12229        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
12230        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
12231        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
12232    };
12233
12234    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
12235        if (!strcmp(core_device_commands[i].name, name))
12236            return core_device_commands[i].proc;
12237    }
12238
12239    return nullptr;
12240}
12241
12242static PFN_vkVoidFunction
12243intercept_khr_swapchain_command(const char *name, VkDevice dev) {
12244    static const struct {
12245        const char *name;
12246        PFN_vkVoidFunction proc;
12247    } khr_swapchain_commands[] = {
12248        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
12249        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
12250        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
12251        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
12252        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
12253    };
12254    layer_data *dev_data = nullptr;
12255
12256    if (dev) {
12257        dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
12258        if (!dev_data->device_extensions.wsi_enabled)
12259            return nullptr;
12260    }
12261
12262    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
12263        if (!strcmp(khr_swapchain_commands[i].name, name))
12264            return khr_swapchain_commands[i].proc;
12265    }
12266
12267    if (dev_data) {
12268        if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
12269            return nullptr;
12270    }
12271
12272    if (!strcmp("vkCreateSharedSwapchainsKHR", name))
12273        return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
12274
12275    return nullptr;
12276}
12277
12278static PFN_vkVoidFunction
12279intercept_khr_surface_command(const char *name, VkInstance instance) {
12280    static const struct {
12281        const char *name;
12282        PFN_vkVoidFunction proc;
12283        bool instance_layer_data::*enable;
12284    } khr_surface_commands[] = {
12285#ifdef VK_USE_PLATFORM_ANDROID_KHR
12286        {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
12287            &instance_layer_data::androidSurfaceExtensionEnabled},
12288#endif // VK_USE_PLATFORM_ANDROID_KHR
12289#ifdef VK_USE_PLATFORM_MIR_KHR
12290        {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
12291            &instance_layer_data::mirSurfaceExtensionEnabled},
12292#endif // VK_USE_PLATFORM_MIR_KHR
12293#ifdef VK_USE_PLATFORM_WAYLAND_KHR
12294        {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
12295            &instance_layer_data::waylandSurfaceExtensionEnabled},
12296#endif // VK_USE_PLATFORM_WAYLAND_KHR
12297#ifdef VK_USE_PLATFORM_WIN32_KHR
12298        {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
12299            &instance_layer_data::win32SurfaceExtensionEnabled},
12300#endif // VK_USE_PLATFORM_WIN32_KHR
12301#ifdef VK_USE_PLATFORM_XCB_KHR
12302        {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
12303            &instance_layer_data::xcbSurfaceExtensionEnabled},
12304#endif // VK_USE_PLATFORM_XCB_KHR
12305#ifdef VK_USE_PLATFORM_XLIB_KHR
12306        {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
12307            &instance_layer_data::xlibSurfaceExtensionEnabled},
12308#endif // VK_USE_PLATFORM_XLIB_KHR
12309        {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
12310            &instance_layer_data::surfaceExtensionEnabled},
12311    };
12312
12313    instance_layer_data *instance_data = nullptr;
12314    if (instance) {
12315        instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12316    }
12317
12318    for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
12319        if (!strcmp(khr_surface_commands[i].name, name)) {
12320            if (instance_data && !(instance_data->*(khr_surface_commands[i].enable)))
12321                return nullptr;
12322            return khr_surface_commands[i].proc;
12323        }
12324    }
12325
12326    return nullptr;
12327}
12328
12329} // namespace core_validation
12330
12331// vk_layer_logging.h expects these to be defined
12332
12333VKAPI_ATTR VkResult VKAPI_CALL
12334vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12335                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
12336    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
12337}
12338
12339VKAPI_ATTR void VKAPI_CALL
12340vkDestroyDebugReportCallbackEXT(VkInstance instance,
12341                                VkDebugReportCallbackEXT msgCallback,
12342                                const VkAllocationCallbacks *pAllocator) {
12343    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
12344}
12345
12346VKAPI_ATTR void VKAPI_CALL
12347vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
12348                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
12349    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
12350}
12351
12352// loader-layer interface v0, just wrappers since there is only a layer
12353
12354VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
12355vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
12356    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12357}
12358
12359VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
12360vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12361    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12362}
12363
12364VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
12365vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
12366    // the layer command handles VK_NULL_HANDLE just fine internally
12367    assert(physicalDevice == VK_NULL_HANDLE);
12368    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12369}
12370
12371VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12372                                                                                    const char *pLayerName, uint32_t *pCount,
12373                                                                                    VkExtensionProperties *pProperties) {
12374    // the layer command handles VK_NULL_HANDLE just fine internally
12375    assert(physicalDevice == VK_NULL_HANDLE);
12376    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12377}
12378
12379VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12380    return core_validation::GetDeviceProcAddr(dev, funcName);
12381}
12382
12383VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12384    return core_validation::GetInstanceProcAddr(instance, funcName);
12385}
12386