core_validation.cpp revision e2f07119052936753dc3dde7f86d4c46cfd0f1c5
1/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
34 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
39#include <algorithm>
40#include <assert.h>
41#include <iostream>
42#include <list>
43#include <map>
44#include <memory>
45#include <mutex>
46#include <set>
47#include <sstream>
48#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
51#include <string>
52#include <inttypes.h>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_enum_string_helper.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "core_validation.h"
64#include "buffer_validation.h"
65#include "shader_validation.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_extension_utils.h"
69#include "vk_layer_utils.h"
70
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
75#define LOGCONSOLE(...)      \
76    {                        \
77        printf(__VA_ARGS__); \
78        printf("\n");        \
79    }
80#endif
81
82// TODO: remove on NDK update (r15 will probably have proper STL impl)
83#ifdef __ANDROID__
84namespace std {
85
86template <typename T>
87std::string to_string(T var) {
88    std::ostringstream ss;
89    ss << var;
90    return ss.str();
91}
92}
93#endif
94
95// This intentionally includes a cpp file
96#include "vk_safe_struct.cpp"
97
98using mutex_t = std::mutex;
99using lock_guard_t = std::lock_guard<mutex_t>;
100using unique_lock_t = std::unique_lock<mutex_t>;
101
102namespace core_validation {
103
104using std::unordered_map;
105using std::unordered_set;
106using std::unique_ptr;
107using std::vector;
108using std::string;
109using std::stringstream;
110using std::max;
111
112// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
113// Object value will be used to identify them internally.
114static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
115// 2nd special memory handle used to flag object as unbound from memory
116static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
117
118// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
119// by the extent of a swapchain targeting the surface.
120static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
121
122struct instance_layer_data {
123    VkInstance instance = VK_NULL_HANDLE;
124    debug_report_data *report_data = nullptr;
125    std::vector<VkDebugReportCallbackEXT> logging_callback;
126    VkLayerInstanceDispatchTable dispatch_table;
127
128    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
129    uint32_t physical_devices_count = 0;
130    CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
131    uint32_t physical_device_groups_count = 0;
132    CHECK_DISABLED disabled = {};
133
134    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
135    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
136
137    InstanceExtensions extensions;
138};
139
140struct layer_data {
141    debug_report_data *report_data = nullptr;
142    VkLayerDispatchTable dispatch_table;
143
144    DeviceExtensions extensions = {};
145    unordered_set<VkQueue> queues;  // All queues under given device
146    // Layer specific data
147    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
148    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
149    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
150    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
151    unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
152    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
153    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
154    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
155    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
156    unordered_map<VkDescriptorSetLayout, std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> descriptorSetLayoutMap;
157    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
158    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
159    unordered_map<VkFence, FENCE_NODE> fenceMap;
160    unordered_map<VkQueue, QUEUE_STATE> queueMap;
161    unordered_map<VkEvent, EVENT_STATE> eventMap;
162    unordered_map<QueryObject, bool> queryToStateMap;
163    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
164    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
165    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
166    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
167    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
168    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
169    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
170    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
171    unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
172    unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
173
174    VkDevice device = VK_NULL_HANDLE;
175    VkPhysicalDevice physical_device = VK_NULL_HANDLE;
176
177    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
178
179    VkPhysicalDeviceFeatures enabled_features = {};
180    // Device specific data
181    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
182    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
183    VkPhysicalDeviceProperties phys_dev_props = {};
184};
185
186// TODO : Do we need to guard access to layer_data_map w/ lock?
187static unordered_map<void *, layer_data *> layer_data_map;
188static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
189
190static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
191
192static const VkLayerProperties global_layer = {
193    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
194};
195
196template <class TCreateInfo>
197void ValidateLayerOrdering(const TCreateInfo &createInfo) {
198    bool foundLayer = false;
199    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
200        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
201            foundLayer = true;
202        }
203        // This has to be logged to console as we don't have a callback at this point.
204        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
205            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
206        }
207    }
208}
209
210// TODO : This can be much smarter, using separate locks for separate global data
211static mutex_t global_lock;
212
213// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
214IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
215    auto iv_it = dev_data->imageViewMap.find(image_view);
216    if (iv_it == dev_data->imageViewMap.end()) {
217        return nullptr;
218    }
219    return iv_it->second.get();
220}
221// Return sampler node ptr for specified sampler or else NULL
222SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
223    auto sampler_it = dev_data->samplerMap.find(sampler);
224    if (sampler_it == dev_data->samplerMap.end()) {
225        return nullptr;
226    }
227    return sampler_it->second.get();
228}
229// Return image state ptr for specified image or else NULL
230IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
231    auto img_it = dev_data->imageMap.find(image);
232    if (img_it == dev_data->imageMap.end()) {
233        return nullptr;
234    }
235    return img_it->second.get();
236}
237// Return buffer state ptr for specified buffer or else NULL
238BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
239    auto buff_it = dev_data->bufferMap.find(buffer);
240    if (buff_it == dev_data->bufferMap.end()) {
241        return nullptr;
242    }
243    return buff_it->second.get();
244}
245// Return swapchain node for specified swapchain or else NULL
246SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
247    auto swp_it = dev_data->swapchainMap.find(swapchain);
248    if (swp_it == dev_data->swapchainMap.end()) {
249        return nullptr;
250    }
251    return swp_it->second.get();
252}
253// Return buffer node ptr for specified buffer or else NULL
254BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
255    auto bv_it = dev_data->bufferViewMap.find(buffer_view);
256    if (bv_it == dev_data->bufferViewMap.end()) {
257        return nullptr;
258    }
259    return bv_it->second.get();
260}
261
262FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
263    auto it = dev_data->fenceMap.find(fence);
264    if (it == dev_data->fenceMap.end()) {
265        return nullptr;
266    }
267    return &it->second;
268}
269
270EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
271    auto it = dev_data->eventMap.find(event);
272    if (it == dev_data->eventMap.end()) {
273        return nullptr;
274    }
275    return &it->second;
276}
277
278QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
279    auto it = dev_data->queryPoolMap.find(query_pool);
280    if (it == dev_data->queryPoolMap.end()) {
281        return nullptr;
282    }
283    return &it->second;
284}
285
286QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
287    auto it = dev_data->queueMap.find(queue);
288    if (it == dev_data->queueMap.end()) {
289        return nullptr;
290    }
291    return &it->second;
292}
293
294SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
295    auto it = dev_data->semaphoreMap.find(semaphore);
296    if (it == dev_data->semaphoreMap.end()) {
297        return nullptr;
298    }
299    return &it->second;
300}
301
302COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
303    auto it = dev_data->commandPoolMap.find(pool);
304    if (it == dev_data->commandPoolMap.end()) {
305        return nullptr;
306    }
307    return &it->second;
308}
309
310PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
311    auto it = instance_data->physical_device_map.find(phys);
312    if (it == instance_data->physical_device_map.end()) {
313        return nullptr;
314    }
315    return &it->second;
316}
317
318SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
319    auto it = instance_data->surface_map.find(surface);
320    if (it == instance_data->surface_map.end()) {
321        return nullptr;
322    }
323    return &it->second;
324}
325
326DeviceExtensions const *GetEnabledExtensions(layer_data const *dev_data) {
327    return &dev_data->extensions;
328}
329
330// Return ptr to memory binding for given handle of specified type
331static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
332    switch (type) {
333        case kVulkanObjectTypeImage:
334            return GetImageState(dev_data, VkImage(handle));
335        case kVulkanObjectTypeBuffer:
336            return GetBufferState(dev_data, VkBuffer(handle));
337        default:
338            break;
339    }
340    return nullptr;
341}
342// prototype
343GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
344
345// Return ptr to info in map container containing mem, or NULL if not found
346//  Calls to this function should be wrapped in mutex
347DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
348    auto mem_it = dev_data->memObjMap.find(mem);
349    if (mem_it == dev_data->memObjMap.end()) {
350        return NULL;
351    }
352    return mem_it->second.get();
353}
354
355static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
356                             const VkMemoryAllocateInfo *pAllocateInfo) {
357    assert(object != NULL);
358
359    dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
360}
361
362// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
363static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle, VulkanObjectType type,
364                                  const char *functionName) {
365    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
366    if (mem_info) {
367        if (!mem_info->bound_ranges[bound_object_handle].valid) {
368            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
369                           HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
370                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
371                           ", please fill the memory before using.",
372                           functionName, HandleToUint64(mem), object_string[type], bound_object_handle);
373        }
374    }
375    return false;
376}
377// For given image_state
378//  If mem is special swapchain key, then verify that image_state valid member is true
379//  Else verify that the image's bound memory range is valid
380bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
381    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
382        if (!image_state->valid) {
383            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
384                           HandleToUint64(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
385                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
386                           functionName, HandleToUint64(image_state->image));
387        }
388    } else {
389        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), kVulkanObjectTypeImage,
390                                     functionName);
391    }
392    return false;
393}
394// For given buffer_state, verify that the range it's bound to is valid
395bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
396    return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer,
397                                 functionName);
398}
399// For the given memory allocation, set the range bound by the given handle object to the valid param value
400static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
401    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
402    if (mem_info) {
403        mem_info->bound_ranges[handle].valid = valid;
404    }
405}
406// For given image node
407//  If mem is special swapchain key, then set entire image_state to valid param value
408//  Else set the image's bound memory range to valid param value
409void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
410    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
411        image_state->valid = valid;
412    } else {
413        SetMemoryValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), valid);
414    }
415}
416// For given buffer node set the buffer's bound memory range to valid param value
417void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
418    SetMemoryValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), valid);
419}
420
421// Create binding link between given sampler and command buffer node
422void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
423    sampler_state->cb_bindings.insert(cb_node);
424    cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
425}
426
427// Create binding link between given image node and command buffer node
428void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
429    // Skip validation if this image was created through WSI
430    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
431        // First update CB binding in MemObj mini CB list
432        for (auto mem_binding : image_state->GetBoundMemory()) {
433            DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
434            if (pMemInfo) {
435                pMemInfo->cb_bindings.insert(cb_node);
436                // Now update CBInfo's Mem reference list
437                cb_node->memObjs.insert(mem_binding);
438            }
439        }
440        // Now update cb binding for image
441        cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
442        image_state->cb_bindings.insert(cb_node);
443    }
444}
445
446// Create binding link between given image view node and its image with command buffer node
447void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
448    // First add bindings for imageView
449    view_state->cb_bindings.insert(cb_node);
450    cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
451    auto image_state = GetImageState(dev_data, view_state->create_info.image);
452    // Add bindings for image within imageView
453    if (image_state) {
454        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
455    }
456}
457
458// Create binding link between given buffer node and command buffer node
459void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
460    // First update CB binding in MemObj mini CB list
461    for (auto mem_binding : buffer_state->GetBoundMemory()) {
462        DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
463        if (pMemInfo) {
464            pMemInfo->cb_bindings.insert(cb_node);
465            // Now update CBInfo's Mem reference list
466            cb_node->memObjs.insert(mem_binding);
467        }
468    }
469    // Now update cb binding for buffer
470    cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
471    buffer_state->cb_bindings.insert(cb_node);
472}
473
474// Create binding link between given buffer view node and its buffer with command buffer node
475void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
476    // First add bindings for bufferView
477    view_state->cb_bindings.insert(cb_node);
478    cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
479    auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
480    // Add bindings for buffer within bufferView
481    if (buffer_state) {
482        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
483    }
484}
485
486// For every mem obj bound to particular CB, free bindings related to that CB
487static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
488    if (cb_node) {
489        if (cb_node->memObjs.size() > 0) {
490            for (auto mem : cb_node->memObjs) {
491                DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
492                if (pInfo) {
493                    pInfo->cb_bindings.erase(cb_node);
494                }
495            }
496            cb_node->memObjs.clear();
497        }
498    }
499}
500
501// Clear a single object binding from given memory object, or report error if binding is missing
502static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
503    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
504    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
505    if (mem_info) {
506        mem_info->obj_bindings.erase({handle, type});
507    }
508    return false;
509}
510
511// ClearMemoryObjectBindings clears the binding of objects to memory
512//  For the given object it pulls the memory bindings and makes sure that the bindings
513//  no longer refer to the object being cleared. This occurs when objects are destroyed.
514bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
515    bool skip = false;
516    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
517    if (mem_binding) {
518        if (!mem_binding->sparse) {
519            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
520        } else {  // Sparse, clear all bindings
521            for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
522                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
523            }
524        }
525    }
526    return skip;
527}
528
529// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
530bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
531                              const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
532    bool result = false;
533    if (VK_NULL_HANDLE == mem) {
534        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
535                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
536                                                      " used with no memory bound. Memory should be bound by calling "
537                                                      "vkBind%sMemory(). %s",
538                         api_name, type_name, handle, type_name, validation_error_map[error_code]);
539    } else if (MEMORY_UNBOUND == mem) {
540        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
541                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
542                                                      " used with no memory bound and previously bound memory was freed. "
543                                                      "Memory must not be freed prior to this operation. %s",
544                         api_name, type_name, handle, validation_error_map[error_code]);
545    }
546    return result;
547}
548
549// Check to see if memory was ever bound to this image
550bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
551                                  UNIQUE_VALIDATION_ERROR_CODE error_code) {
552    bool result = false;
553    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
554        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
555                                          error_code);
556    }
557    return result;
558}
559
560// Check to see if memory was bound to this buffer
561bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
562                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
563    bool result = false;
564    if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
565        result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
566                                          "Buffer", error_code);
567    }
568    return result;
569}
570
571// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
572// Corresponding valid usage checks are in ValidateSetMemBinding().
573static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type, const char *apiName) {
574    if (mem != VK_NULL_HANDLE) {
575        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
576        assert(mem_binding);
577        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
578        if (mem_info) {
579            mem_info->obj_bindings.insert({handle, type});
580            // For image objects, make sure default memory state is correctly set
581            // TODO : What's the best/correct way to handle this?
582            if (kVulkanObjectTypeImage == type) {
583                auto const image_state = GetImageState(dev_data, VkImage(handle));
584                if (image_state) {
585                    VkImageCreateInfo ici = image_state->createInfo;
586                    if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
587                        // TODO::  More memory state transition stuff.
588                    }
589                }
590            }
591            mem_binding->binding.mem = mem;
592        }
593    }
594}
595
596// Valid usage checks for a call to SetMemBinding().
597// For NULL mem case, output warning
598// Make sure given object is in global object map
599//  IF a previous binding existed, output validation error
600//  Otherwise, add reference from objectInfo to memoryInfo
601//  Add reference off of objInfo
602// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
603static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
604                                  const char *apiName) {
605    bool skip = false;
606    // It's an error to bind an object to NULL memory
607    if (mem != VK_NULL_HANDLE) {
608        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
609        assert(mem_binding);
610        if (mem_binding->sparse) {
611            UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_1740082a;
612            const char *handle_type = "IMAGE";
613            if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
614                error_code = VALIDATION_ERROR_1700080c;
615                handle_type = "BUFFER";
616            } else {
617                assert(strcmp(apiName, "vkBindImageMemory()") == 0);
618            }
619            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
620                            HandleToUint64(mem), __LINE__, error_code, "MEM",
621                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
622                            ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT). %s",
623                            apiName, HandleToUint64(mem), handle, handle_type, validation_error_map[error_code]);
624        }
625        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
626        if (mem_info) {
627            DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
628            if (prev_binding) {
629                UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_17400828;
630                if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
631                    error_code = VALIDATION_ERROR_1700080a;
632                } else {
633                    assert(strcmp(apiName, "vkBindImageMemory()") == 0);
634                }
635                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
636                                HandleToUint64(mem), __LINE__, error_code, "MEM",
637                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
638                                ") which has already been bound to mem object 0x%" PRIxLEAST64 ". %s",
639                                apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem),
640                                validation_error_map[error_code]);
641            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
642                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
643                                HandleToUint64(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
644                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
645                                ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
646                                "Vulkan so this attempt to bind to new memory is not allowed.",
647                                apiName, HandleToUint64(mem), handle);
648            }
649        }
650    }
651    return skip;
652}
653
654// For NULL mem case, clear any previous binding Else...
655// Make sure given object is in its object map
656//  IF a previous binding existed, update binding
657//  Add reference from objectInfo to memoryInfo
658//  Add reference off of object's binding info
659// Return VK_TRUE if addition is successful, VK_FALSE otherwise
660static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
661    bool skip = VK_FALSE;
662    // Handle NULL case separately, just clear previous binding & decrement reference
663    if (binding.mem == VK_NULL_HANDLE) {
664        // TODO : This should cause the range of the resource to be unbound according to spec
665    } else {
666        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
667        assert(mem_binding);
668        assert(mem_binding->sparse);
669        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
670        if (mem_info) {
671            mem_info->obj_bindings.insert({handle, type});
672            // Need to set mem binding for this object
673            mem_binding->sparse_bindings.insert(binding);
674        }
675    }
676    return skip;
677}
678
679// Check object status for selected flag state
680static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
681                            const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
682    if (!(pNode->status & status_mask)) {
683        char const *const message = validation_error_map[msg_code];
684        return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
685                       HandleToUint64(pNode->commandBuffer), __LINE__, msg_code, "DS", "command buffer object 0x%p: %s. %s.",
686                       pNode->commandBuffer, fail_msg, message);
687    }
688    return false;
689}
690
691// Retrieve pipeline node ptr for given pipeline object
692static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
693    auto it = dev_data->pipelineMap.find(pipeline);
694    if (it == dev_data->pipelineMap.end()) {
695        return nullptr;
696    }
697    return it->second;
698}
699
700RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
701    auto it = dev_data->renderPassMap.find(renderpass);
702    if (it == dev_data->renderPassMap.end()) {
703        return nullptr;
704    }
705    return it->second.get();
706}
707
708FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
709    auto it = dev_data->frameBufferMap.find(framebuffer);
710    if (it == dev_data->frameBufferMap.end()) {
711        return nullptr;
712    }
713    return it->second.get();
714}
715
716std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
717                                                                                         VkDescriptorSetLayout dsLayout) {
718    auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
719    if (it == dev_data->descriptorSetLayoutMap.end()) {
720        return nullptr;
721    }
722    return it->second;
723}
724
725static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
726    auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
727    if (it == dev_data->pipelineLayoutMap.end()) {
728        return nullptr;
729    }
730    return &it->second;
731}
732
733shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
734    auto it = dev_data->shaderModuleMap.find(module);
735    if (it == dev_data->shaderModuleMap.end()) {
736        return nullptr;
737    }
738    return it->second.get();
739}
740
741// Return true if for a given PSO, the given state enum is dynamic, else return false
742static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
743    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
744        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
745            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
746        }
747    }
748    return false;
749}
750
751// Validate state stored as flags at time of draw call
752static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
753                                      UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
754    bool result = false;
755    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
756        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
757         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
758        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
759                                  "Dynamic line width state not set for this command buffer", msg_code);
760    }
761    if (pPipe->graphicsPipelineCI.pRasterizationState &&
762        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
763        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
764                                  "Dynamic depth bias state not set for this command buffer", msg_code);
765    }
766    if (pPipe->blendConstantsEnabled) {
767        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
768                                  "Dynamic blend constants state not set for this command buffer", msg_code);
769    }
770    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
771        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
772        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
773                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
774    }
775    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
776        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
777        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
778                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
779        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
780                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
781        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
782                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
783    }
784    if (indexed) {
785        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
786                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
787    }
788
789    return result;
790}
791
792// Verify attachment reference compatibility according to spec
793//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
794//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
795//   to make sure that format and samples counts match.
796//  If not, they are not compatible.
797static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
798                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
799                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
800                                             const VkAttachmentDescription *pSecondaryAttachments) {
801    // Check potential NULL cases first to avoid nullptr issues later
802    if (pPrimary == nullptr) {
803        if (pSecondary == nullptr) {
804            return true;
805        }
806        return false;
807    } else if (pSecondary == nullptr) {
808        return false;
809    }
810    if (index >= primaryCount) {  // Check secondary as if primary is VK_ATTACHMENT_UNUSED
811        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) return true;
812    } else if (index >= secondaryCount) {  // Check primary as if secondary is VK_ATTACHMENT_UNUSED
813        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) return true;
814    } else {  // Format and sample count must match
815        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
816            return true;
817        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
818            return false;
819        }
820        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
821             pSecondaryAttachments[pSecondary[index].attachment].format) &&
822            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
823             pSecondaryAttachments[pSecondary[index].attachment].samples))
824            return true;
825    }
826    // Format and sample counts didn't match
827    return false;
828}
829// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
830// For given primary RenderPass object and secondary RenderPassCreateInfo, verify that they're compatible
831static bool verify_renderpass_compatibility(const layer_data *dev_data, const VkRenderPassCreateInfo *primaryRPCI,
832                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
833    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
834        stringstream errorStr;
835        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
836                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
837        errorMsg = errorStr.str();
838        return false;
839    }
840    uint32_t spIndex = 0;
841    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
842        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
843        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
844        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
845        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
846        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
847            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
848                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
849                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
850                stringstream errorStr;
851                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
852                errorMsg = errorStr.str();
853                return false;
854            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
855                                                         primaryColorCount, primaryRPCI->pAttachments,
856                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
857                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
858                stringstream errorStr;
859                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
860                errorMsg = errorStr.str();
861                return false;
862            }
863        }
864
865        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 1,
866                                              primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
867                                              1, secondaryRPCI->pAttachments)) {
868            stringstream errorStr;
869            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
870            errorMsg = errorStr.str();
871            return false;
872        }
873
874        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
875        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
876        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
877        for (uint32_t i = 0; i < inputMax; ++i) {
878            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryInputCount,
879                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
880                                                  secondaryInputCount, secondaryRPCI->pAttachments)) {
881                stringstream errorStr;
882                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
883                errorMsg = errorStr.str();
884                return false;
885            }
886        }
887    }
888    return true;
889}
890
891// Return Set node ptr for specified set or else NULL
892cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
893    auto set_it = dev_data->setMap.find(set);
894    if (set_it == dev_data->setMap.end()) {
895        return NULL;
896    }
897    return set_it->second;
898}
899
900// For given pipeline, return number of MSAA samples, or one if MSAA disabled
901static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
902    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
903        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
904        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
905    }
906    return VK_SAMPLE_COUNT_1_BIT;
907}
908
909static void list_bits(std::ostream &s, uint32_t bits) {
910    for (int i = 0; i < 32 && bits; i++) {
911        if (bits & (1 << i)) {
912            s << i;
913            bits &= ~(1 << i);
914            if (bits) {
915                s << ",";
916            }
917        }
918    }
919}
920
921// Validate draw-time state related to the PSO
922static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
923                                          PIPELINE_STATE const *pPipeline) {
924    bool skip = false;
925
926    // Verify vertex binding
927    if (pPipeline->vertexBindingDescriptions.size() > 0) {
928        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
929            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
930            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
931                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
932                skip |=
933                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
934                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
935                            "The Pipeline State Object (0x%" PRIxLEAST64
936                            ") expects that this Command Buffer's vertex binding Index %u "
937                            "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
938                            "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
939                            HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
940            }
941        }
942    } else {
943        if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
944            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
945                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
946                            DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
947                            "Vertex buffers are bound to command buffer (0x%p"
948                            ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
949                            pCB->commandBuffer, HandleToUint64(state.pipeline_state->pipeline));
950        }
951    }
952    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
953    // Skip check if rasterization is disabled or there is no viewport.
954    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
955         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
956        pPipeline->graphicsPipelineCI.pViewportState) {
957        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
958        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
959
960        if (dynViewport) {
961            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
962            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
963            if (missingViewportMask) {
964                std::stringstream ss;
965                ss << "Dynamic viewport(s) ";
966                list_bits(ss, missingViewportMask);
967                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
968                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
969                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
970            }
971        }
972
973        if (dynScissor) {
974            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
975            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
976            if (missingScissorMask) {
977                std::stringstream ss;
978                ss << "Dynamic scissor(s) ";
979                list_bits(ss, missingScissorMask);
980                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
981                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
982                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
983            }
984        }
985    }
986
987    // Verify that any MSAA request in PSO matches sample# in bound FB
988    // Skip the check if rasterization is disabled.
989    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
990        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
991        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
992        if (pCB->activeRenderPass) {
993            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
994            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
995            uint32_t i;
996            unsigned subpass_num_samples = 0;
997
998            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
999                auto attachment = subpass_desc->pColorAttachments[i].attachment;
1000                if (attachment != VK_ATTACHMENT_UNUSED)
1001                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1002            }
1003
1004            if (subpass_desc->pDepthStencilAttachment &&
1005                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1006                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1007                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1008            }
1009
1010            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
1011                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1012                                HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1013                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
1014                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
1015                                HandleToUint64(pPipeline->pipeline), pso_num_samples,
1016                                HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
1017            }
1018        } else {
1019            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1020                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1021                            "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
1022                            HandleToUint64(pPipeline->pipeline));
1023        }
1024    }
1025    // Verify that PSO creation renderPass is compatible with active renderPass
1026    if (pCB->activeRenderPass) {
1027        std::string err_string;
1028        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
1029            !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
1030                                             err_string)) {
1031            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1032            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1033                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
1034                            "At Draw time the active render pass (0x%" PRIxLEAST64
1035                            ") is incompatible w/ gfx pipeline "
1036                            "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
1037                            HandleToUint64(pCB->activeRenderPass->renderPass), HandleToUint64(pPipeline->pipeline),
1038                            HandleToUint64(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
1039        }
1040
1041        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1042            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1043                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
1044                            "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
1045                            pCB->activeSubpass);
1046        }
1047    }
1048    // TODO : Add more checks here
1049
1050    return skip;
1051}
1052
1053// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1054// pipelineLayout[layoutIndex]
1055static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1056                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1057                                            string &errorMsg) {
1058    auto num_sets = pipeline_layout->set_layouts.size();
1059    if (layoutIndex >= num_sets) {
1060        stringstream errorStr;
1061        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1062                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1063                 << layoutIndex;
1064        errorMsg = errorStr.str();
1065        return false;
1066    }
1067    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1068    return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
1069}
1070
1071// Validate overall state at the time of a draw call
1072static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
1073                              const VkPipelineBindPoint bind_point, const char *function,
1074                              UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1075    bool result = false;
1076    auto const &state = cb_node->lastBound[bind_point];
1077    PIPELINE_STATE *pPipe = state.pipeline_state;
1078    if (nullptr == pPipe) {
1079        result |= log_msg(
1080            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1081            HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
1082            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
1083        // Early return as any further checks below will be busted w/o a pipeline
1084        if (result) return true;
1085    }
1086    // First check flag states
1087    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1088        result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
1089
1090    // Now complete other state checks
1091    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1092        string errorString;
1093        auto pipeline_layout = pPipe->pipeline_layout;
1094
1095        for (const auto &set_binding_pair : pPipe->active_slots) {
1096            uint32_t setIndex = set_binding_pair.first;
1097            // If valid set is not bound throw an error
1098            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1099                result |=
1100                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1101                            HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
1102                            "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.",
1103                            HandleToUint64(pPipe->pipeline), setIndex);
1104            } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
1105                                                        errorString)) {
1106                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1107                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1108                result |=
1109                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1110                            HandleToUint64(setHandle), __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
1111                            "VkDescriptorSet (0x%" PRIxLEAST64
1112                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
1113                            HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
1114            } else {  // Valid set is bound and layout compatible, validate that it's updated
1115                // Pull the set node
1116                cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1117                // Validate the draw-time state for this descriptor set
1118                std::string err_str;
1119                if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], cb_node, function,
1120                                                       &err_str)) {
1121                    auto set = descriptor_set->GetSet();
1122                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1123                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), __LINE__,
1124                                      DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
1125                                      "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s time: %s",
1126                                      HandleToUint64(set), function, err_str.c_str());
1127                }
1128            }
1129        }
1130    }
1131
1132    // Check general pipeline state that needs to be validated at drawtime
1133    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, pPipe);
1134
1135    return result;
1136}
1137
1138static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1139    auto const &state = cb_state->lastBound[bind_point];
1140    PIPELINE_STATE *pPipe = state.pipeline_state;
1141    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1142        for (const auto &set_binding_pair : pPipe->active_slots) {
1143            uint32_t setIndex = set_binding_pair.first;
1144            // Pull the set node
1145            cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1146            // Bind this set and its active descriptor resources to the command buffer
1147            descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
1148            // For given active slots record updated images & buffers
1149            descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
1150        }
1151    }
1152    if (pPipe->vertexBindingDescriptions.size() > 0) {
1153        cb_state->vertex_buffer_used = true;
1154    }
1155}
1156
1157// Validate HW line width capabilities prior to setting requested line width.
1158static bool verifyLineWidth(layer_data *dev_data, DRAW_STATE_ERROR dsError, VulkanObjectType object_type, const uint64_t &target,
1159                            float lineWidth) {
1160    bool skip = false;
1161
1162    // First check to see if the physical device supports wide lines.
1163    if ((VK_FALSE == dev_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
1164        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target, __LINE__,
1165                        dsError, "DS",
1166                        "Attempt to set lineWidth to %f but physical device wideLines feature "
1167                        "not supported/enabled so lineWidth must be 1.0f!",
1168                        lineWidth);
1169    } else {
1170        // Otherwise, make sure the width falls in the valid range.
1171        if ((dev_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
1172            (dev_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
1173            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target,
1174                            __LINE__, dsError, "DS",
1175                            "Attempt to set lineWidth to %f but physical device limits line width "
1176                            "to between [%f, %f]!",
1177                            lineWidth, dev_data->phys_dev_properties.properties.limits.lineWidthRange[0],
1178                            dev_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
1179        }
1180    }
1181
1182    return skip;
1183}
1184
1185static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<PIPELINE_STATE *> const &pPipelines, int pipelineIndex) {
1186    bool skip = false;
1187
1188    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
1189
1190    // If create derivative bit is set, check that we've specified a base
1191    // pipeline correctly, and that the base pipeline was created to allow
1192    // derivatives.
1193    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1194        PIPELINE_STATE *pBasePipeline = nullptr;
1195        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1196              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1197            // This check is a superset of VALIDATION_ERROR_096005a8 and VALIDATION_ERROR_096005aa
1198            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1199                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1200                            "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1201        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1202            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1203                skip |=
1204                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1205                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_208005a0, "DS",
1206                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
1207                            validation_error_map[VALIDATION_ERROR_208005a0]);
1208            } else {
1209                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
1210            }
1211        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1212            pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
1213        }
1214
1215        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1216            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1217                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1218                            "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1219        }
1220    }
1221
1222    return skip;
1223}
1224
1225// UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
1226static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<PIPELINE_STATE *> const &pPipelines, int pipelineIndex) {
1227    bool skip = false;
1228
1229        PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
1230
1231    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
1232    // produces nonsense errors that confuse users. Other layers should already
1233    // emit errors for renderpass being invalid.
1234    auto subpass_desc = &pPipeline->render_pass_ci.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1235    if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->render_pass_ci.subpassCount) {
1236        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1237                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ee, "DS",
1238                        "Invalid Pipeline CreateInfo State: Subpass index %u "
1239                            "is out of range for this renderpass (0..%u). %s",
1240                        pPipeline->graphicsPipelineCI.subpass, pPipeline->render_pass_ci.subpassCount - 1,
1241                        validation_error_map[VALIDATION_ERROR_096005ee]);
1242        subpass_desc = nullptr;
1243    }
1244
1245    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1246        const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1247        if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1248            skip |= log_msg(
1249                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1250                HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005d4, "DS",
1251                "vkCreateGraphicsPipelines(): Render pass (0x%" PRIxLEAST64
1252                ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u. %s",
1253                HandleToUint64(pPipeline->graphicsPipelineCI.renderPass), pPipeline->graphicsPipelineCI.subpass,
1254                subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount,
1255                validation_error_map[VALIDATION_ERROR_096005d4]);
1256        }
1257        if (!dev_data->enabled_features.independentBlend) {
1258            if (pPipeline->attachments.size() > 1) {
1259                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1260                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1261                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1262                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1263                    // only attachment state, so memcmp is best suited for the comparison
1264                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1265                               sizeof(pAttachments[0]))) {
1266                        skip |=
1267                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1268                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004ba, "DS",
1269                                    "Invalid Pipeline CreateInfo: If independent blend feature not "
1270                                    "enabled, all elements of pAttachments must be identical. %s",
1271                                    validation_error_map[VALIDATION_ERROR_0f4004ba]);
1272                        break;
1273                    }
1274                }
1275            }
1276        }
1277        if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1278            skip |=
1279                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1280                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004bc, "DS",
1281                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
1282                        validation_error_map[VALIDATION_ERROR_0f4004bc]);
1283        }
1284    }
1285
1286    if (validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
1287        skip = true;
1288    }
1289    // Each shader's stage must be unique
1290    if (pPipeline->duplicate_shaders) {
1291        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1292            if (pPipeline->duplicate_shaders & stage) {
1293                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1294                                HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1295                                "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1296                                string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1297            }
1298        }
1299    }
1300    // VS is required
1301    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1302        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1303                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ae, "DS",
1304                        "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
1305                        validation_error_map[VALIDATION_ERROR_096005ae]);
1306    }
1307    // Either both or neither TC/TE shaders should be defined
1308    bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1309    bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1310    if (has_control && !has_eval) {
1311        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1312                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b2, "DS",
1313                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1314                        validation_error_map[VALIDATION_ERROR_096005b2]);
1315    }
1316    if (!has_control && has_eval) {
1317        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1318                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b4, "DS",
1319                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1320                        validation_error_map[VALIDATION_ERROR_096005b4]);
1321    }
1322    // Compute shaders should be specified independent of Gfx shaders
1323    if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1324        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1325                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b0, "DS",
1326                        "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
1327                        validation_error_map[VALIDATION_ERROR_096005b0]);
1328    }
1329    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1330    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1331    if (has_control && has_eval &&
1332        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1333         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1334        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1335                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c0, "DS",
1336                        "Invalid Pipeline CreateInfo State: "
1337                        "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
1338                        "topology for tessellation pipelines. %s",
1339                        validation_error_map[VALIDATION_ERROR_096005c0]);
1340    }
1341    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1342        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1343        if (!has_control || !has_eval) {
1344            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1345                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c2, "DS",
1346                            "Invalid Pipeline CreateInfo State: "
1347                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
1348                            "topology is only valid for tessellation pipelines. %s",
1349                            validation_error_map[VALIDATION_ERROR_096005c2]);
1350        }
1351    }
1352
1353    // If a rasterization state is provided...
1354    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1355        // Make sure that the line width conforms to the HW.
1356        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
1357            skip |=
1358                verifyLineWidth(dev_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, kVulkanObjectTypePipeline,
1359                                HandleToUint64(pPipeline->pipeline), pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
1360        }
1361
1362        if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1363            (!dev_data->enabled_features.depthClamp)) {
1364            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1365                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_1020061c, "DS",
1366                            "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable "
1367                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE. %s",
1368                            validation_error_map[VALIDATION_ERROR_1020061c]);
1369        }
1370
1371        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1372            (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1373            (!dev_data->enabled_features.depthBiasClamp)) {
1374            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1375                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1376                            "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
1377                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1378                            "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1379        }
1380
1381        // If rasterization is enabled...
1382        if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1383            if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1384                (!dev_data->enabled_features.alphaToOne)) {
1385                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1386                                HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_10000622, "DS",
1387                                "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1388                                "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE. %s",
1389                                validation_error_map[VALIDATION_ERROR_10000622]);
1390            }
1391
1392            // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1393            if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1394                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1395                if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1396                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1397                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e0, "DS",
1398                                    "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is "
1399                                    "enabled and subpass uses a depth/stencil attachment. %s",
1400                                    validation_error_map[VALIDATION_ERROR_096005e0]);
1401
1402                } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1403                           (!dev_data->enabled_features.depthBounds)) {
1404                    skip |= log_msg(
1405                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1406                        HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1407                        "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the depthBoundsTestEnable "
1408                        "member of the VkPipelineDepthStencilStateCreateInfo structure must be set to VK_FALSE.");
1409                }
1410            }
1411
1412            // If subpass uses color attachments, pColorBlendState must be valid pointer
1413            if (subpass_desc) {
1414                uint32_t color_attachment_count = 0;
1415                for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1416                    if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1417                        ++color_attachment_count;
1418                    }
1419                }
1420                if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1421                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1422                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e2, "DS",
1423                                    "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is "
1424                                    "enabled and subpass uses color attachments. %s",
1425                                    validation_error_map[VALIDATION_ERROR_096005e2]);
1426                }
1427            }
1428        }
1429    }
1430
1431    auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
1432    if (vi != NULL) {
1433        for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
1434            VkFormat format = vi->pVertexAttributeDescriptions[j].format;
1435            // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
1436            VkFormatProperties properties;
1437            dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format, &properties);
1438            if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
1439                skip |= log_msg(
1440                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1441                    __LINE__, VALIDATION_ERROR_14a004de, "IMAGE",
1442                    "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
1443                        "(%s) is not a supported vertex buffer format. %s",
1444                    pipelineIndex, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_14a004de]);
1445            }
1446        }
1447    }
1448
1449    return skip;
1450}
1451
1452// Free the Pipeline nodes
1453static void deletePipelines(layer_data *dev_data) {
1454    if (dev_data->pipelineMap.size() <= 0) return;
1455    for (auto &pipe_map_pair : dev_data->pipelineMap) {
1456        delete pipe_map_pair.second;
1457    }
1458    dev_data->pipelineMap.clear();
1459}
1460
1461// Block of code at start here specifically for managing/tracking DSs
1462
1463// Return Pool node ptr for specified pool or else NULL
1464DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
1465    auto pool_it = dev_data->descriptorPoolMap.find(pool);
1466    if (pool_it == dev_data->descriptorPoolMap.end()) {
1467        return NULL;
1468    }
1469    return pool_it->second;
1470}
1471
1472// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1473// func_str is the name of the calling function
1474// Return false if no errors occur
1475// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1476static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
1477    if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1478    bool skip = false;
1479    auto set_node = dev_data->setMap.find(set);
1480    if (set_node == dev_data->setMap.end()) {
1481        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1482                        HandleToUint64(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
1483                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
1484                        HandleToUint64(set));
1485    } else {
1486        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1487        if (set_node->second->in_use.load()) {
1488            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1489                            HandleToUint64(set), __LINE__, VALIDATION_ERROR_2860026a, "DS",
1490                            "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
1491                            func_str.c_str(), HandleToUint64(set), validation_error_map[VALIDATION_ERROR_2860026a]);
1492        }
1493    }
1494    return skip;
1495}
1496
1497// Remove set from setMap and delete the set
1498static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1499    dev_data->setMap.erase(descriptor_set->GetSet());
1500    delete descriptor_set;
1501}
1502// Free all DS Pools including their Sets & related sub-structs
1503// NOTE : Calls to this function should be wrapped in mutex
1504static void deletePools(layer_data *dev_data) {
1505    for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1506        // Remove this pools' sets from setMap and delete them
1507        for (auto ds : ii->second->sets) {
1508            freeDescriptorSet(dev_data, ds);
1509        }
1510        ii->second->sets.clear();
1511        delete ii->second;
1512        ii = dev_data->descriptorPoolMap.erase(ii);
1513    }
1514}
1515
1516static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
1517                                VkDescriptorPoolResetFlags flags) {
1518    DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1519    // TODO: validate flags
1520    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
1521    for (auto ds : pPool->sets) {
1522        freeDescriptorSet(dev_data, ds);
1523    }
1524    pPool->sets.clear();
1525    // Reset available count for each type and available sets for this pool
1526    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
1527        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
1528    }
1529    pPool->availableSets = pPool->maxSets;
1530}
1531
1532// For given CB object, fetch associated CB Node from map
1533GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
1534    auto it = dev_data->commandBufferMap.find(cb);
1535    if (it == dev_data->commandBufferMap.end()) {
1536        return NULL;
1537    }
1538    return it->second;
1539}
1540
1541// If a renderpass is active, verify that the given command type is appropriate for current subpass state
1542bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1543    if (!pCB->activeRenderPass) return false;
1544    bool skip = false;
1545    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1546        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
1547        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1548                        HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1549                        "Commands cannot be called in a subpass using secondary command buffers.");
1550    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1551        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1552                        HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1553                        "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1554    }
1555    return skip;
1556}
1557
1558bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
1559                           VkQueueFlags required_flags, UNIQUE_VALIDATION_ERROR_CODE error_code) {
1560    auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
1561    if (pool) {
1562        VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
1563        if (!(required_flags & queue_flags)) {
1564            string required_flags_string;
1565            for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1566                if (flag & required_flags) {
1567                    if (required_flags_string.size()) {
1568                        required_flags_string += " or ";
1569                    }
1570                    required_flags_string += string_VkQueueFlagBits(flag);
1571                }
1572            }
1573            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1574                           HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
1575                           "Cannot call %s on a command buffer allocated from a pool without %s capabilities. %s.", caller_name,
1576                           required_flags_string.c_str(), validation_error_map[error_code]);
1577        }
1578    }
1579    return false;
1580}
1581
1582static char const * GetCauseStr(VK_OBJECT obj) {
1583    if (obj.type == kVulkanObjectTypeDescriptorSet)
1584        return "destroyed or updated";
1585    if (obj.type == kVulkanObjectTypeCommandBuffer)
1586        return "destroyed or rerecorded";
1587    return "destroyed";
1588}
1589
1590static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
1591    bool skip = false;
1592    for (auto obj : cb_state->broken_bindings) {
1593        const char *type_str = object_string[obj.type];
1594        const char *cause_str = GetCauseStr(obj);
1595        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1596                        HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1597                        "You are adding %s to command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
1598                        call_source, cb_state->commandBuffer, type_str, obj.handle, cause_str);
1599    }
1600    return skip;
1601}
1602
1603// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1604// there's an issue with the Cmd ordering
1605bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1606    switch (cb_state->state) {
1607        case CB_RECORDING:
1608            return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1609
1610        case CB_INVALID_COMPLETE:
1611        case CB_INVALID_INCOMPLETE:
1612            return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1613
1614        default:
1615            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1616                           HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
1617                           "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
1618    }
1619}
1620
1621// For given object struct return a ptr of BASE_NODE type for its wrapping struct
1622BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
1623    BASE_NODE *base_ptr = nullptr;
1624    switch (object_struct.type) {
1625        case kVulkanObjectTypeDescriptorSet: {
1626            base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
1627            break;
1628        }
1629        case kVulkanObjectTypeSampler: {
1630            base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
1631            break;
1632        }
1633        case kVulkanObjectTypeQueryPool: {
1634            base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
1635            break;
1636        }
1637        case kVulkanObjectTypePipeline: {
1638            base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
1639            break;
1640        }
1641        case kVulkanObjectTypeBuffer: {
1642            base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
1643            break;
1644        }
1645        case kVulkanObjectTypeBufferView: {
1646            base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
1647            break;
1648        }
1649        case kVulkanObjectTypeImage: {
1650            base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
1651            break;
1652        }
1653        case kVulkanObjectTypeImageView: {
1654            base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
1655            break;
1656        }
1657        case kVulkanObjectTypeEvent: {
1658            base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
1659            break;
1660        }
1661        case kVulkanObjectTypeDescriptorPool: {
1662            base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
1663            break;
1664        }
1665        case kVulkanObjectTypeCommandPool: {
1666            base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
1667            break;
1668        }
1669        case kVulkanObjectTypeFramebuffer: {
1670            base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
1671            break;
1672        }
1673        case kVulkanObjectTypeRenderPass: {
1674            base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
1675            break;
1676        }
1677        case kVulkanObjectTypeDeviceMemory: {
1678            base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
1679            break;
1680        }
1681        default:
1682            // TODO : Any other objects to be handled here?
1683            assert(0);
1684            break;
1685    }
1686    return base_ptr;
1687}
1688
1689// Tie the VK_OBJECT to the cmd buffer which includes:
1690//  Add object_binding to cmd buffer
1691//  Add cb_binding to object
1692static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
1693    cb_bindings->insert(cb_node);
1694    cb_node->object_bindings.insert(obj);
1695}
1696// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
1697static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
1698    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
1699    if (base_obj) base_obj->cb_bindings.erase(cb_node);
1700}
1701// Reset the command buffer state
1702//  Maintain the createInfo and set state to CB_NEW, but clear all other state
1703static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
1704    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
1705    if (pCB) {
1706        pCB->in_use.store(0);
1707        // Reset CB state (note that createInfo is not cleared)
1708        pCB->commandBuffer = cb;
1709        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1710        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
1711        pCB->hasDrawCmd = false;
1712        pCB->state = CB_NEW;
1713        pCB->submitCount = 0;
1714        pCB->status = 0;
1715        pCB->viewportMask = 0;
1716        pCB->scissorMask = 0;
1717
1718        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
1719            pCB->lastBound[i].reset();
1720        }
1721
1722        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
1723        pCB->activeRenderPass = nullptr;
1724        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
1725        pCB->activeSubpass = 0;
1726        pCB->broken_bindings.clear();
1727        pCB->waitedEvents.clear();
1728        pCB->events.clear();
1729        pCB->writeEventsBeforeWait.clear();
1730        pCB->waitedEventsBeforeQueryReset.clear();
1731        pCB->queryToStateMap.clear();
1732        pCB->activeQueries.clear();
1733        pCB->startedQueries.clear();
1734        pCB->imageLayoutMap.clear();
1735        pCB->eventToStageMap.clear();
1736        pCB->drawData.clear();
1737        pCB->currentDrawData.buffers.clear();
1738        pCB->vertex_buffer_used = false;
1739        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
1740        // If secondary, invalidate any primary command buffer that may call us.
1741        if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1742            invalidateCommandBuffers(dev_data,
1743                                     pCB->linkedCommandBuffers,
1744                                     {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
1745        }
1746
1747        // Remove reverse command buffer links.
1748        for (auto pSubCB : pCB->linkedCommandBuffers) {
1749            pSubCB->linkedCommandBuffers.erase(pCB);
1750        }
1751        pCB->linkedCommandBuffers.clear();
1752        pCB->updateImages.clear();
1753        pCB->updateBuffers.clear();
1754        clear_cmd_buf_and_mem_references(dev_data, pCB);
1755        pCB->queue_submit_functions.clear();
1756        pCB->cmd_execute_commands_functions.clear();
1757        pCB->eventUpdates.clear();
1758        pCB->queryUpdates.clear();
1759
1760        // Remove object bindings
1761        for (auto obj : pCB->object_bindings) {
1762            removeCommandBufferBinding(dev_data, &obj, pCB);
1763        }
1764        pCB->object_bindings.clear();
1765        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
1766        for (auto framebuffer : pCB->framebuffers) {
1767            auto fb_state = GetFramebufferState(dev_data, framebuffer);
1768            if (fb_state) fb_state->cb_bindings.erase(pCB);
1769        }
1770        pCB->framebuffers.clear();
1771        pCB->activeFramebuffer = VK_NULL_HANDLE;
1772    }
1773}
1774
1775CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) {
1776    // initially assume everything is static state
1777    CBStatusFlags flags = CBSTATUS_ALL_STATE_SET;
1778
1779    if (ds) {
1780        for (uint32_t i = 0; i < ds->dynamicStateCount; i++) {
1781            switch (ds->pDynamicStates[i]) {
1782                case VK_DYNAMIC_STATE_LINE_WIDTH:
1783                    flags &= ~CBSTATUS_LINE_WIDTH_SET;
1784                    break;
1785                case VK_DYNAMIC_STATE_DEPTH_BIAS:
1786                    flags &= ~CBSTATUS_DEPTH_BIAS_SET;
1787                    break;
1788                case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1789                    flags &= ~CBSTATUS_BLEND_CONSTANTS_SET;
1790                    break;
1791                case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1792                    flags &= ~CBSTATUS_DEPTH_BOUNDS_SET;
1793                    break;
1794                case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1795                    flags &= ~CBSTATUS_STENCIL_READ_MASK_SET;
1796                    break;
1797                case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1798                    flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
1799                    break;
1800                case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1801                    flags &= ~CBSTATUS_STENCIL_REFERENCE_SET;
1802                    break;
1803                default:
1804                    break;
1805            }
1806        }
1807    }
1808
1809    return flags;
1810}
1811
1812// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
1813// render pass.
1814bool insideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName,
1815                      UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1816    bool inside = false;
1817    if (pCB->activeRenderPass) {
1818        inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1819                         HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1820                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
1821                         HandleToUint64(pCB->activeRenderPass->renderPass), validation_error_map[msgCode]);
1822    }
1823    return inside;
1824}
1825
1826// Flags validation error if the associated call is made outside a render pass. The apiName
1827// routine should ONLY be called inside a render pass.
1828bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1829    bool outside = false;
1830    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
1831        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
1832         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
1833        outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1834                          HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1835                          "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
1836    }
1837    return outside;
1838}
1839
1840static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
1841    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
1842}
1843
1844// For the given ValidationCheck enum, set all relevant instance disabled flags to true
1845void SetDisabledFlags(instance_layer_data *instance_data, VkValidationFlagsEXT *val_flags_struct) {
1846    for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
1847        switch (val_flags_struct->pDisabledValidationChecks[i]) {
1848            case VK_VALIDATION_CHECK_SHADERS_EXT:
1849                instance_data->disabled.shader_validation = true;
1850                break;
1851            case VK_VALIDATION_CHECK_ALL_EXT:
1852                // Set all disabled flags to true
1853                instance_data->disabled.SetAll(true);
1854                break;
1855            default:
1856                break;
1857        }
1858    }
1859}
1860
1861VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
1862                                              VkInstance *pInstance) {
1863    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
1864
1865    assert(chain_info->u.pLayerInfo);
1866    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
1867    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
1868    if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
1869
1870    // Advance the link info for the next element on the chain
1871    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
1872
1873    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
1874    if (result != VK_SUCCESS) return result;
1875
1876    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
1877    instance_data->instance = *pInstance;
1878    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
1879    instance_data->report_data = debug_report_create_instance(
1880        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
1881    instance_data->extensions.InitFromInstanceCreateInfo(pCreateInfo);
1882    init_core_validation(instance_data, pAllocator);
1883
1884    ValidateLayerOrdering(*pCreateInfo);
1885    // Parse any pNext chains
1886    if (pCreateInfo->pNext) {
1887        GENERIC_HEADER *struct_header = (GENERIC_HEADER *)pCreateInfo->pNext;
1888        while (struct_header) {
1889            // Check for VkValidationFlagsExt
1890            if (VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT == struct_header->sType) {
1891                SetDisabledFlags(instance_data, (VkValidationFlagsEXT *)struct_header);
1892            }
1893            struct_header = (GENERIC_HEADER *)struct_header->pNext;
1894        }
1895    }
1896
1897    return result;
1898}
1899
1900// Hook DestroyInstance to remove tableInstanceMap entry
1901VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
1902    // TODOSC : Shouldn't need any customization here
1903    dispatch_key key = get_dispatch_key(instance);
1904    // TBD: Need any locking this early, in case this function is called at the
1905    // same time by more than one thread?
1906    instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
1907    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
1908
1909    lock_guard_t lock(global_lock);
1910    // Clean up logging callback, if any
1911    while (instance_data->logging_callback.size() > 0) {
1912        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
1913        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
1914        instance_data->logging_callback.pop_back();
1915    }
1916
1917    layer_debug_report_destroy_instance(instance_data->report_data);
1918    FreeLayerDataPtr(key, instance_layer_data_map);
1919}
1920
1921static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1922                                              uint32_t requested_queue_family, int32_t err_code, const char *cmd_name,
1923                                              const char *queue_family_var_name, const char *vu_note = nullptr) {
1924    bool skip = false;
1925
1926    if (!vu_note) vu_note = validation_error_map[err_code];
1927
1928    const char *conditional_ext_cmd =
1929        instance_data->extensions.vk_khr_get_physical_device_properties_2 ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR" : "";
1930
1931    std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
1932                                 ? "the pQueueFamilyPropertyCount was never obtained"
1933                                 : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
1934
1935    if (requested_queue_family >= pd_state->queue_family_count) {
1936        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
1937                        HandleToUint64(pd_state->phys_device), __LINE__, err_code, "DL",
1938                        "%s: %s (= %" PRIu32
1939                        ") is not less than any previously obtained pQueueFamilyPropertyCount from "
1940                        "vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
1941                        cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str(), vu_note);
1942    }
1943    return skip;
1944}
1945
1946// Verify VkDeviceQueueCreateInfos
1947static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1948                                           uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
1949    bool skip = false;
1950
1951    for (uint32_t i = 0; i < info_count; ++i) {
1952        const auto requested_queue_family = infos[i].queueFamilyIndex;
1953
1954        // Verify that requested queue family is known to be valid at this point in time
1955        std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
1956        skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family, VALIDATION_ERROR_06c002fa,
1957                                                  "vkCreateDevice", queue_family_var_name.c_str());
1958
1959        // Verify that requested  queue count of queue family is known to be valid at this point in time
1960        if (requested_queue_family < pd_state->queue_family_count) {
1961            const auto requested_queue_count = infos[i].queueCount;
1962            const auto queue_family_props_count = pd_state->queue_family_properties.size();
1963            const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
1964            const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
1965                                                  ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR"
1966                                                  : "";
1967            std::string count_note =
1968                !queue_family_has_props
1969                    ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
1970                    : "i.e. is not less than or equal to " +
1971                          std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
1972
1973            if (!queue_family_has_props ||
1974                requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
1975                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1976                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), __LINE__,
1977                                VALIDATION_ERROR_06c002fc, "DL",
1978                                "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
1979                                ") is not "
1980                                "less than or equal to available queue count for this "
1981                                "pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex} (=%" PRIu32
1982                                ") obtained previously "
1983                                "from vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
1984                                i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str(),
1985                                validation_error_map[VALIDATION_ERROR_06c002fc]);
1986            }
1987        }
1988    }
1989
1990    return skip;
1991}
1992
1993// Verify that features have been queried and that they are available
1994static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1995                                      const VkPhysicalDeviceFeatures *requested_features) {
1996    bool skip = false;
1997
1998    const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features);
1999    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
2000    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
2001    //  Need to provide the struct member name with the issue. To do that seems like we'll
2002    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
2003    uint32_t errors = 0;
2004    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2005    for (uint32_t i = 0; i < total_bools; i++) {
2006        if (requested[i] > actual[i]) {
2007            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2008                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
2009                            "While calling vkCreateDevice(), requesting feature '%s' in VkPhysicalDeviceFeatures struct, "
2010                            "which is not available on this device.",
2011                            GetPhysDevFeatureString(i));
2012            errors++;
2013        }
2014    }
2015    if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
2016        // If user didn't request features, notify them that they should
2017        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
2018        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2019                        0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
2020                        "You requested features that are unavailable on this device. You should first query feature "
2021                        "availability by calling vkGetPhysicalDeviceFeatures().");
2022    }
2023    return skip;
2024}
2025
2026VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2027                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2028    bool skip = false;
2029    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2030
2031    unique_lock_t lock(global_lock);
2032    auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
2033
2034    // TODO: object_tracker should perhaps do this instead
2035    //       and it does not seem to currently work anyway -- the loader just crashes before this point
2036    if (!GetPhysicalDeviceState(instance_data, gpu)) {
2037        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2038                        0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
2039                        "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2040    }
2041
2042    // Check that any requested features are available
2043    if (pCreateInfo->pEnabledFeatures) {
2044        skip |= ValidateRequestedFeatures(instance_data, pd_state, pCreateInfo->pEnabledFeatures);
2045    }
2046    skip |=
2047        ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2048
2049    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2050
2051    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2052
2053    assert(chain_info->u.pLayerInfo);
2054    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2055    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2056    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
2057    if (fpCreateDevice == NULL) {
2058        return VK_ERROR_INITIALIZATION_FAILED;
2059    }
2060
2061    // Advance the link info for the next element on the chain
2062    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2063
2064    lock.unlock();
2065
2066    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
2067    if (result != VK_SUCCESS) {
2068        return result;
2069    }
2070
2071    lock.lock();
2072    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
2073
2074    device_data->instance_data = instance_data;
2075    // Setup device dispatch table
2076    layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
2077    device_data->device = *pDevice;
2078    // Save PhysicalDevice handle
2079    device_data->physical_device = gpu;
2080
2081    device_data->report_data = layer_debug_report_create_device(instance_data->report_data, *pDevice);
2082    device_data->extensions.InitFromDeviceCreateInfo(&instance_data->extensions, pCreateInfo);
2083
2084    // Get physical device limits for this device
2085    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
2086    uint32_t count;
2087    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2088    device_data->phys_dev_properties.queue_family_properties.resize(count);
2089    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
2090        gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
2091    // TODO: device limits should make sure these are compatible
2092    if (pCreateInfo->pEnabledFeatures) {
2093        device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
2094    } else {
2095        memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
2096    }
2097    // Store physical device properties and physical device mem limits into device layer_data structs
2098    instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
2099    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
2100    lock.unlock();
2101
2102    ValidateLayerOrdering(*pCreateInfo);
2103
2104    return result;
2105}
2106
2107// prototype
2108VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2109    // TODOSC : Shouldn't need any customization here
2110    dispatch_key key = get_dispatch_key(device);
2111    layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
2112    // Free all the memory
2113    unique_lock_t lock(global_lock);
2114    deletePipelines(dev_data);
2115    dev_data->renderPassMap.clear();
2116    for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
2117        delete (*ii).second;
2118    }
2119    dev_data->commandBufferMap.clear();
2120    // This will also delete all sets in the pool & remove them from setMap
2121    deletePools(dev_data);
2122    // All sets should be removed
2123    assert(dev_data->setMap.empty());
2124    dev_data->descriptorSetLayoutMap.clear();
2125    dev_data->imageViewMap.clear();
2126    dev_data->imageMap.clear();
2127    dev_data->imageSubresourceMap.clear();
2128    dev_data->imageLayoutMap.clear();
2129    dev_data->bufferViewMap.clear();
2130    dev_data->bufferMap.clear();
2131    // Queues persist until device is destroyed
2132    dev_data->queueMap.clear();
2133    // Report any memory leaks
2134    layer_debug_report_destroy_device(device);
2135    lock.unlock();
2136
2137#if DISPATCH_MAP_DEBUG
2138    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
2139#endif
2140
2141    dev_data->dispatch_table.DestroyDevice(device, pAllocator);
2142    FreeLayerDataPtr(key, layer_data_map);
2143}
2144
2145static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
2146
2147// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2148//   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
2149static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2150                                         UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
2151    bool skip = false;
2152    if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2153        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2154                        geo_error_id, "DL",
2155                        "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when "
2156                        "device does not have geometryShader feature enabled. %s",
2157                        caller, validation_error_map[geo_error_id]);
2158    }
2159    if (!dev_data->enabled_features.tessellationShader &&
2160        (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2161        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2162                        tess_error_id, "DL",
2163                        "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT "
2164                        "and/or VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device "
2165                        "does not have tessellationShader feature enabled. %s",
2166                        caller, validation_error_map[tess_error_id]);
2167    }
2168    return skip;
2169}
2170
2171// Loop through bound objects and increment their in_use counts.
2172static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2173    for (auto obj : cb_node->object_bindings) {
2174        auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2175        if (base_obj) {
2176            base_obj->in_use.fetch_add(1);
2177        }
2178    }
2179}
2180// Track which resources are in-flight by atomically incrementing their "in_use" count
2181static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2182    cb_node->submitCount++;
2183    cb_node->in_use.fetch_add(1);
2184
2185    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2186    IncrementBoundObjects(dev_data, cb_node);
2187    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2188    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2189    //  should then be flagged prior to calling this function
2190    for (auto drawDataElement : cb_node->drawData) {
2191        for (auto buffer : drawDataElement.buffers) {
2192            auto buffer_state = GetBufferState(dev_data, buffer);
2193            if (buffer_state) {
2194                buffer_state->in_use.fetch_add(1);
2195            }
2196        }
2197    }
2198    for (auto event : cb_node->writeEventsBeforeWait) {
2199        auto event_state = GetEventNode(dev_data, event);
2200        if (event_state) event_state->write_in_use++;
2201    }
2202}
2203
2204// Note: This function assumes that the global lock is held by the calling thread.
2205// For the given queue, verify the queue state up to the given seq number.
2206// Currently the only check is to make sure that if there are events to be waited on prior to
2207//  a QueryReset, make sure that all such events have been signalled.
2208static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2209    bool skip = false;
2210
2211    // sequence number we want to validate up to, per queue
2212    std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs { { initial_queue, initial_seq } };
2213    // sequence number we've completed validation for, per queue
2214    std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2215    std::vector<QUEUE_STATE *> worklist { initial_queue };
2216
2217    while (worklist.size()) {
2218        auto queue = worklist.back();
2219        worklist.pop_back();
2220
2221        auto target_seq = target_seqs[queue];
2222        auto seq = std::max(done_seqs[queue], queue->seq);
2223        auto sub_it = queue->submissions.begin() + int(seq - queue->seq);  // seq >= queue->seq
2224
2225        for (; seq < target_seq; ++sub_it, ++seq) {
2226            for (auto &wait : sub_it->waitSemaphores) {
2227                auto other_queue = GetQueueState(dev_data, wait.queue);
2228
2229                if (other_queue == queue)
2230                    continue;   // semaphores /always/ point backwards, so no point here.
2231
2232                auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2233                auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2234
2235                // if this wait is for another queue, and covers new sequence
2236                // numbers beyond what we've already validated, mark the new
2237                // target seq and (possibly-re)add the queue to the worklist.
2238                if (other_done_seq < other_target_seq) {
2239                    target_seqs[other_queue] = other_target_seq;
2240                    worklist.push_back(other_queue);
2241                }
2242            }
2243
2244            for (auto cb : sub_it->cbs) {
2245                auto cb_node = GetCBNode(dev_data, cb);
2246                if (cb_node) {
2247                    for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
2248                        for (auto event : queryEventsPair.second) {
2249                            if (dev_data->eventMap[event].needsSignaled) {
2250                                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2251                                                VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
2252                                                "Cannot get query results on queryPool 0x%" PRIx64
2253                                                " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
2254                                                HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
2255                                                HandleToUint64(event));
2256                            }
2257                        }
2258                    }
2259                }
2260            }
2261        }
2262
2263        // finally mark the point we've now validated this queue to.
2264        done_seqs[queue] = seq;
2265    }
2266
2267    return skip;
2268}
2269
2270// When the given fence is retired, verify outstanding queue operations through the point of the fence
2271static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2272    auto fence_state = GetFenceNode(dev_data, fence);
2273    if (VK_NULL_HANDLE != fence_state->signaler.first) {
2274        return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
2275    }
2276    return false;
2277}
2278
2279// Decrement in-use count for objects bound to command buffer
2280static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2281    BASE_NODE *base_obj = nullptr;
2282    for (auto obj : cb_node->object_bindings) {
2283        base_obj = GetStateStructPtrFromObject(dev_data, obj);
2284        if (base_obj) {
2285            base_obj->in_use.fetch_sub(1);
2286        }
2287    }
2288}
2289
2290static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2291    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2292
2293    // Roll this queue forward, one submission at a time.
2294    while (pQueue->seq < seq) {
2295        auto &submission = pQueue->submissions.front();
2296
2297        for (auto &wait : submission.waitSemaphores) {
2298            auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
2299            if (pSemaphore) {
2300                pSemaphore->in_use.fetch_sub(1);
2301            }
2302            auto &lastSeq = otherQueueSeqs[wait.queue];
2303            lastSeq = std::max(lastSeq, wait.seq);
2304        }
2305
2306        for (auto &semaphore : submission.signalSemaphores) {
2307            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2308            if (pSemaphore) {
2309                pSemaphore->in_use.fetch_sub(1);
2310            }
2311        }
2312
2313        for (auto cb : submission.cbs) {
2314            auto cb_node = GetCBNode(dev_data, cb);
2315            if (!cb_node) {
2316                continue;
2317            }
2318            // First perform decrement on general case bound objects
2319            DecrementBoundResources(dev_data, cb_node);
2320            for (auto drawDataElement : cb_node->drawData) {
2321                for (auto buffer : drawDataElement.buffers) {
2322                    auto buffer_state = GetBufferState(dev_data, buffer);
2323                    if (buffer_state) {
2324                        buffer_state->in_use.fetch_sub(1);
2325                    }
2326                }
2327            }
2328            for (auto event : cb_node->writeEventsBeforeWait) {
2329                auto eventNode = dev_data->eventMap.find(event);
2330                if (eventNode != dev_data->eventMap.end()) {
2331                    eventNode->second.write_in_use--;
2332                }
2333            }
2334            for (auto queryStatePair : cb_node->queryToStateMap) {
2335                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2336            }
2337            for (auto eventStagePair : cb_node->eventToStageMap) {
2338                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2339            }
2340
2341            cb_node->in_use.fetch_sub(1);
2342        }
2343
2344        auto pFence = GetFenceNode(dev_data, submission.fence);
2345        if (pFence) {
2346            pFence->state = FENCE_RETIRED;
2347        }
2348
2349        pQueue->submissions.pop_front();
2350        pQueue->seq++;
2351    }
2352
2353    // Roll other queues forward to the highest seq we saw a wait for
2354    for (auto qs : otherQueueSeqs) {
2355        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
2356    }
2357}
2358
2359// Submit a fence to a queue, delimiting previous fences and previous untracked
2360// work by it.
2361static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2362    pFence->state = FENCE_INFLIGHT;
2363    pFence->signaler.first = pQueue->queue;
2364    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2365}
2366
2367static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2368    bool skip = false;
2369    if ((pCB->in_use.load() || current_submit_count > 1) &&
2370        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2371        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2372                        __LINE__, VALIDATION_ERROR_31a0008e, "DS",
2373                        "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
2374                        validation_error_map[VALIDATION_ERROR_31a0008e]);
2375    }
2376    return skip;
2377}
2378
2379static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2380                                       int current_submit_count, UNIQUE_VALIDATION_ERROR_CODE vu_id) {
2381    bool skip = false;
2382    if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2383    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2384    if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2385        (cb_state->submitCount + current_submit_count > 1)) {
2386        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2387                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
2388                        "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
2389                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
2390                        cb_state->commandBuffer, cb_state->submitCount + current_submit_count);
2391    }
2392
2393    // Validate that cmd buffers have been updated
2394    switch (cb_state->state) {
2395        case CB_INVALID_INCOMPLETE:
2396        case CB_INVALID_COMPLETE:
2397            skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2398            break;
2399
2400        case CB_NEW:
2401            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2402                            (uint64_t)(cb_state->commandBuffer), __LINE__, vu_id, "DS",
2403                            "Command buffer 0x%p used in the call to %s is unrecorded and contains no commands. %s",
2404                            cb_state->commandBuffer, call_source, validation_error_map[vu_id]);
2405            break;
2406
2407        case CB_RECORDING:
2408            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2409                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
2410                            "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!",
2411                            cb_state->commandBuffer, call_source);
2412            break;
2413
2414        default: /* recorded */
2415            break;
2416    }
2417    return skip;
2418}
2419
2420static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2421    bool skip = false;
2422
2423    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2424    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2425    //  should then be flagged prior to calling this function
2426    for (auto drawDataElement : cb_node->drawData) {
2427        for (auto buffer : drawDataElement.buffers) {
2428            auto buffer_state = GetBufferState(dev_data, buffer);
2429            if (!buffer_state) {
2430                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2431                                HandleToUint64(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
2432                                "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", HandleToUint64(buffer));
2433            }
2434        }
2435    }
2436    return skip;
2437}
2438
2439// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
2440bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
2441                           const uint32_t *indices) {
2442    bool found = false;
2443    bool skip = false;
2444    auto queue_state = GetQueueState(dev_data, queue);
2445    if (queue_state) {
2446        for (uint32_t i = 0; i < count; i++) {
2447            if (indices[i] == queue_state->queueFamilyIndex) {
2448                found = true;
2449                break;
2450            }
2451        }
2452
2453        if (!found) {
2454            skip = log_msg(
2455                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type], object->handle, __LINE__,
2456                DRAWSTATE_INVALID_QUEUE_FAMILY, "DS", "vkQueueSubmit: Command buffer 0x%" PRIxLEAST64 " contains %s 0x%" PRIxLEAST64
2457                                                      " which was not created allowing concurrent access to this queue family %d.",
2458                HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle, queue_state->queueFamilyIndex);
2459        }
2460    }
2461    return skip;
2462}
2463
2464// Validate that queueFamilyIndices of primary command buffers match this queue
2465// Secondary command buffers were previously validated in vkCmdExecuteCommands().
2466static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2467    bool skip = false;
2468    auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
2469    auto queue_state = GetQueueState(dev_data, queue);
2470
2471    if (pPool && queue_state) {
2472        if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2473            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2474                            HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_31a00094, "DS",
2475                            "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
2476                            "0x%p from queue family %d. %s",
2477                            pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
2478                            validation_error_map[VALIDATION_ERROR_31a00094]);
2479        }
2480
2481        // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2482        for (auto object : pCB->object_bindings) {
2483            if (object.type == kVulkanObjectTypeImage) {
2484                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
2485                if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2486                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2487                                                  image_state->createInfo.pQueueFamilyIndices);
2488                }
2489            } else if (object.type == kVulkanObjectTypeBuffer) {
2490                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
2491                if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2492                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2493                                                  buffer_state->createInfo.pQueueFamilyIndices);
2494                }
2495            }
2496        }
2497    }
2498
2499    return skip;
2500}
2501
2502static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2503    // Track in-use for resources off of primary and any secondary CBs
2504    bool skip = false;
2505
2506    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2507    // on device
2508    skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2509
2510    skip |= validateResources(dev_data, pCB);
2511
2512    for (auto pSubCB : pCB->linkedCommandBuffers) {
2513        skip |= validateResources(dev_data, pSubCB);
2514        // TODO: replace with invalidateCommandBuffers() at recording.
2515        if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2516            !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2517            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2518                    __LINE__, VALIDATION_ERROR_31a00092, "DS",
2519                    "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
2520                    "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
2521                    pCB->commandBuffer, pSubCB->commandBuffer, pSubCB->primaryCommandBuffer,
2522                    validation_error_map[VALIDATION_ERROR_31a00092]);
2523        }
2524    }
2525
2526    skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count, VALIDATION_ERROR_31a00090);
2527
2528    return skip;
2529}
2530
2531static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2532    bool skip = false;
2533
2534    if (pFence) {
2535        if (pFence->state == FENCE_INFLIGHT) {
2536            // TODO: opportunities for VALIDATION_ERROR_31a00080, VALIDATION_ERROR_316008b4, VALIDATION_ERROR_16400a0e
2537            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2538                            HandleToUint64(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
2539                            "Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
2540        }
2541
2542        else if (pFence->state == FENCE_RETIRED) {
2543            // TODO: opportunities for VALIDATION_ERROR_31a0007e, VALIDATION_ERROR_316008b2, VALIDATION_ERROR_16400a0e
2544            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2545                            HandleToUint64(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2546                            "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
2547                            HandleToUint64(pFence->fence));
2548        }
2549    }
2550
2551    return skip;
2552}
2553
2554static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2555                                      VkFence fence) {
2556    auto pQueue = GetQueueState(dev_data, queue);
2557    auto pFence = GetFenceNode(dev_data, fence);
2558
2559    // Mark the fence in-use.
2560    if (pFence) {
2561        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2562    }
2563
2564    // Now process each individual submit
2565    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2566        std::vector<VkCommandBuffer> cbs;
2567        const VkSubmitInfo *submit = &pSubmits[submit_idx];
2568        vector<SEMAPHORE_WAIT> semaphore_waits;
2569        vector<VkSemaphore> semaphore_signals;
2570        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2571            VkSemaphore semaphore = submit->pWaitSemaphores[i];
2572            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2573            if (pSemaphore) {
2574                if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
2575                    semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
2576                    pSemaphore->in_use.fetch_add(1);
2577                }
2578                pSemaphore->signaler.first = VK_NULL_HANDLE;
2579                pSemaphore->signaled = false;
2580            }
2581        }
2582        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2583            VkSemaphore semaphore = submit->pSignalSemaphores[i];
2584            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2585            if (pSemaphore) {
2586                pSemaphore->signaler.first = queue;
2587                pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
2588                pSemaphore->signaled = true;
2589                pSemaphore->in_use.fetch_add(1);
2590                semaphore_signals.push_back(semaphore);
2591            }
2592        }
2593        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2594            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2595            if (cb_node) {
2596                cbs.push_back(submit->pCommandBuffers[i]);
2597                for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2598                    cbs.push_back(secondaryCmdBuffer->commandBuffer);
2599                }
2600                UpdateCmdBufImageLayouts(dev_data, cb_node);
2601                incrementResources(dev_data, cb_node);
2602                for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2603                    incrementResources(dev_data, secondaryCmdBuffer);
2604                }
2605            }
2606        }
2607        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
2608                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
2609    }
2610
2611    if (pFence && !submitCount) {
2612        // If no submissions, but just dropping a fence on the end of the queue,
2613        // record an empty submission with just the fence, so we can determine
2614        // its completion.
2615        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
2616                                         fence);
2617    }
2618}
2619
2620static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2621                                       VkFence fence) {
2622    auto pFence = GetFenceNode(dev_data, fence);
2623    bool skip = ValidateFenceForSubmit(dev_data, pFence);
2624    if (skip) {
2625        return true;
2626    }
2627
2628    unordered_set<VkSemaphore> signaled_semaphores;
2629    unordered_set<VkSemaphore> unsignaled_semaphores;
2630    vector<VkCommandBuffer> current_cmds;
2631    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
2632    // Now verify each individual submit
2633    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2634        const VkSubmitInfo *submit = &pSubmits[submit_idx];
2635        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2636            skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
2637                                                 VALIDATION_ERROR_13c00098, VALIDATION_ERROR_13c0009a);
2638            VkSemaphore semaphore = submit->pWaitSemaphores[i];
2639            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2640            if (pSemaphore) {
2641                if (unsignaled_semaphores.count(semaphore) ||
2642                    (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
2643                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2644                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2645                                    "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
2646                                    HandleToUint64(semaphore));
2647                } else {
2648                    signaled_semaphores.erase(semaphore);
2649                    unsignaled_semaphores.insert(semaphore);
2650                }
2651            }
2652        }
2653        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2654            VkSemaphore semaphore = submit->pSignalSemaphores[i];
2655            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2656            if (pSemaphore) {
2657                if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
2658                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2659                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2660                                    "Queue 0x%p is signaling semaphore 0x%" PRIx64
2661                                    " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
2662                                    queue, HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
2663                } else {
2664                    unsignaled_semaphores.erase(semaphore);
2665                    signaled_semaphores.insert(semaphore);
2666                }
2667            }
2668        }
2669        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2670            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2671            if (cb_node) {
2672                skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, dev_data->imageLayoutMap, localImageLayoutMap);
2673                current_cmds.push_back(submit->pCommandBuffers[i]);
2674                skip |= validatePrimaryCommandBufferState(
2675                    dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
2676                skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
2677
2678                // Potential early exit here as bad object state may crash in delayed function calls
2679                if (skip) {
2680                    return true;
2681                }
2682
2683                // Call submit-time functions to validate/update state
2684                for (auto &function : cb_node->queue_submit_functions) {
2685                    skip |= function();
2686                }
2687                for (auto &function : cb_node->eventUpdates) {
2688                    skip |= function(queue);
2689                }
2690                for (auto &function : cb_node->queryUpdates) {
2691                    skip |= function(queue);
2692                }
2693            }
2694        }
2695    }
2696    return skip;
2697}
2698
2699VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
2700    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2701    unique_lock_t lock(global_lock);
2702
2703    bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2704    lock.unlock();
2705
2706    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2707
2708    VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
2709
2710    lock.lock();
2711    PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2712    lock.unlock();
2713    return result;
2714}
2715
2716static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
2717    bool skip = false;
2718    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
2719        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2720                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_16c004f8, "MEM",
2721                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
2722                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
2723                        validation_error_map[VALIDATION_ERROR_16c004f8]);
2724    }
2725    return skip;
2726}
2727
2728static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
2729    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
2730    return;
2731}
2732
2733VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
2734                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
2735    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
2736    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2737    unique_lock_t lock(global_lock);
2738    bool skip = PreCallValidateAllocateMemory(dev_data);
2739    if (!skip) {
2740        lock.unlock();
2741        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
2742        lock.lock();
2743        if (VK_SUCCESS == result) {
2744            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
2745        }
2746    }
2747    return result;
2748}
2749
2750// For given obj node, if it is use, flag a validation error and return callback result, else return false
2751bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
2752                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
2753    if (dev_data->instance_data->disabled.object_in_use) return false;
2754    bool skip = false;
2755    if (obj_node->in_use.load()) {
2756        skip |=
2757            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
2758                    __LINE__, error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
2759                    object_string[obj_struct.type], obj_struct.handle, validation_error_map[error_code]);
2760    }
2761    return skip;
2762}
2763
2764static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
2765    *mem_info = GetMemObjInfo(dev_data, mem);
2766    *obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
2767    if (dev_data->instance_data->disabled.free_memory) return false;
2768    bool skip = false;
2769    if (*mem_info) {
2770        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_2880054a);
2771    }
2772    return skip;
2773}
2774
2775static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
2776    // Clear mem binding for any bound objects
2777    for (auto obj : mem_info->obj_bindings) {
2778        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle, __LINE__,
2779                MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
2780                obj.handle, HandleToUint64(mem_info->mem));
2781        switch (obj.type) {
2782            case kVulkanObjectTypeImage: {
2783                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
2784                assert(image_state);  // Any destroyed images should already be removed from bindings
2785                image_state->binding.mem = MEMORY_UNBOUND;
2786                break;
2787            }
2788            case kVulkanObjectTypeBuffer: {
2789                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
2790                assert(buffer_state);  // Any destroyed buffers should already be removed from bindings
2791                buffer_state->binding.mem = MEMORY_UNBOUND;
2792                break;
2793            }
2794            default:
2795                // Should only have buffer or image objects bound to memory
2796                assert(0);
2797        }
2798    }
2799    // Any bound cmd buffers are now invalid
2800    invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
2801    dev_data->memObjMap.erase(mem);
2802}
2803
2804VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
2805    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2806    DEVICE_MEM_INFO *mem_info = nullptr;
2807    VK_OBJECT obj_struct;
2808    unique_lock_t lock(global_lock);
2809    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
2810    if (!skip) {
2811        lock.unlock();
2812        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
2813        lock.lock();
2814        if (mem != VK_NULL_HANDLE) {
2815            PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
2816        }
2817    }
2818}
2819
2820// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
2821//  and that the size of the map range should be:
2822//  1. Not zero
2823//  2. Within the size of the memory allocation
2824static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
2825    bool skip = false;
2826
2827    if (size == 0) {
2828        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2829                       HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2830                       "VkMapMemory: Attempting to map memory range of size zero");
2831    }
2832
2833    auto mem_element = dev_data->memObjMap.find(mem);
2834    if (mem_element != dev_data->memObjMap.end()) {
2835        auto mem_info = mem_element->second.get();
2836        // It is an application error to call VkMapMemory on an object that is already mapped
2837        if (mem_info->mem_range.size != 0) {
2838            skip =
2839                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2840                        HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2841                        "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, HandleToUint64(mem));
2842        }
2843
2844        // Validate that offset + size is within object's allocationSize
2845        if (size == VK_WHOLE_SIZE) {
2846            if (offset >= mem_info->alloc_info.allocationSize) {
2847                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2848                               HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2849                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
2850                               " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
2851                               offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
2852            }
2853        } else {
2854            if ((offset + size) > mem_info->alloc_info.allocationSize) {
2855                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2856                               HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200552, "MEM",
2857                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s",
2858                               offset, size + offset, mem_info->alloc_info.allocationSize,
2859                               validation_error_map[VALIDATION_ERROR_31200552]);
2860            }
2861        }
2862    }
2863    return skip;
2864}
2865
2866static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
2867    auto mem_info = GetMemObjInfo(dev_data, mem);
2868    if (mem_info) {
2869        mem_info->mem_range.offset = offset;
2870        mem_info->mem_range.size = size;
2871    }
2872}
2873
2874static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
2875    bool skip = false;
2876    auto mem_info = GetMemObjInfo(dev_data, mem);
2877    if (mem_info) {
2878        if (!mem_info->mem_range.size) {
2879            // Valid Usage: memory must currently be mapped
2880            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2881                           HandleToUint64(mem), __LINE__, VALIDATION_ERROR_33600562, "MEM",
2882                           "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", HandleToUint64(mem),
2883                           validation_error_map[VALIDATION_ERROR_33600562]);
2884        }
2885        mem_info->mem_range.size = 0;
2886        if (mem_info->shadow_copy) {
2887            free(mem_info->shadow_copy_base);
2888            mem_info->shadow_copy_base = 0;
2889            mem_info->shadow_copy = 0;
2890        }
2891    }
2892    return skip;
2893}
2894
2895// Guard value for pad data
2896static char NoncoherentMemoryFillValue = 0xb;
2897
2898static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
2899                                     void **ppData) {
2900    auto mem_info = GetMemObjInfo(dev_data, mem);
2901    if (mem_info) {
2902        mem_info->p_driver_data = *ppData;
2903        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
2904        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
2905            mem_info->shadow_copy = 0;
2906        } else {
2907            if (size == VK_WHOLE_SIZE) {
2908                size = mem_info->alloc_info.allocationSize - offset;
2909            }
2910            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
2911            assert(SafeModulo(mem_info->shadow_pad_size,
2912                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
2913            // Ensure start of mapped region reflects hardware alignment constraints
2914            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
2915
2916            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
2917            uint64_t start_offset = offset % map_alignment;
2918            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
2919            mem_info->shadow_copy_base =
2920                malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
2921
2922            mem_info->shadow_copy =
2923                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
2924                                         ~(map_alignment - 1)) +
2925                start_offset;
2926            assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
2927                                  map_alignment) == 0);
2928
2929            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
2930            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
2931        }
2932    }
2933}
2934
2935// Verify that state for fence being waited on is appropriate. That is,
2936//  a fence being waited on should not already be signaled and
2937//  it should have been submitted on a queue or during acquire next image
2938static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
2939    bool skip = false;
2940
2941    auto pFence = GetFenceNode(dev_data, fence);
2942    if (pFence) {
2943        if (pFence->state == FENCE_UNSIGNALED) {
2944            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2945                            HandleToUint64(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2946                            "%s called for fence 0x%" PRIxLEAST64
2947                            " which has not been submitted on a Queue or during "
2948                            "acquire next image.",
2949                            apiCall, HandleToUint64(fence));
2950        }
2951    }
2952    return skip;
2953}
2954
2955static void RetireFence(layer_data *dev_data, VkFence fence) {
2956    auto pFence = GetFenceNode(dev_data, fence);
2957    if (pFence->signaler.first != VK_NULL_HANDLE) {
2958        // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
2959        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
2960    } else {
2961        // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
2962        // the fence as retired.
2963        pFence->state = FENCE_RETIRED;
2964    }
2965}
2966
2967static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
2968    if (dev_data->instance_data->disabled.wait_for_fences) return false;
2969    bool skip = false;
2970    for (uint32_t i = 0; i < fence_count; i++) {
2971        skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
2972        skip |= VerifyQueueStateToFence(dev_data, fences[i]);
2973    }
2974    return skip;
2975}
2976
2977static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
2978    // When we know that all fences are complete we can clean/remove their CBs
2979    if ((VK_TRUE == wait_all) || (1 == fence_count)) {
2980        for (uint32_t i = 0; i < fence_count; i++) {
2981            RetireFence(dev_data, fences[i]);
2982        }
2983    }
2984    // NOTE : Alternate case not handled here is when some fences have completed. In
2985    //  this case for app to guarantee which fences completed it will have to call
2986    //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
2987}
2988
2989VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
2990                                             uint64_t timeout) {
2991    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2992    // Verify fence status of submitted fences
2993    unique_lock_t lock(global_lock);
2994    bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
2995    lock.unlock();
2996    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2997
2998    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
2999
3000    if (result == VK_SUCCESS) {
3001        lock.lock();
3002        PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
3003        lock.unlock();
3004    }
3005    return result;
3006}
3007
3008static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
3009    if (dev_data->instance_data->disabled.get_fence_state) return false;
3010    return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
3011}
3012
3013static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
3014
3015VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
3016    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3017    unique_lock_t lock(global_lock);
3018    bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
3019    lock.unlock();
3020    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3021
3022    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
3023    if (result == VK_SUCCESS) {
3024        lock.lock();
3025        PostCallRecordGetFenceStatus(dev_data, fence);
3026        lock.unlock();
3027    }
3028    return result;
3029}
3030
3031static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
3032    // Add queue to tracking set only if it is new
3033    auto result = dev_data->queues.emplace(queue);
3034    if (result.second == true) {
3035        QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
3036        queue_state->queue = queue;
3037        queue_state->queueFamilyIndex = q_family_index;
3038        queue_state->seq = 0;
3039    }
3040}
3041
3042VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3043    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3044    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
3045    lock_guard_t lock(global_lock);
3046
3047    PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
3048}
3049
3050static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
3051    *queue_state = GetQueueState(dev_data, queue);
3052    if (dev_data->instance_data->disabled.queue_wait_idle) return false;
3053    return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
3054}
3055
3056static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
3057    RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
3058}
3059
3060VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
3061    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3062    QUEUE_STATE *queue_state = nullptr;
3063    unique_lock_t lock(global_lock);
3064    bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
3065    lock.unlock();
3066    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3067    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
3068    if (VK_SUCCESS == result) {
3069        lock.lock();
3070        PostCallRecordQueueWaitIdle(dev_data, queue_state);
3071        lock.unlock();
3072    }
3073    return result;
3074}
3075
3076static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
3077    if (dev_data->instance_data->disabled.device_wait_idle) return false;
3078    bool skip = false;
3079    for (auto &queue : dev_data->queueMap) {
3080        skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3081    }
3082    return skip;
3083}
3084
3085static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
3086    for (auto &queue : dev_data->queueMap) {
3087        RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3088    }
3089}
3090
3091VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
3092    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3093    unique_lock_t lock(global_lock);
3094    bool skip = PreCallValidateDeviceWaitIdle(dev_data);
3095    lock.unlock();
3096    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3097    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
3098    if (VK_SUCCESS == result) {
3099        lock.lock();
3100        PostCallRecordDeviceWaitIdle(dev_data);
3101        lock.unlock();
3102    }
3103    return result;
3104}
3105
3106static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
3107    *fence_node = GetFenceNode(dev_data, fence);
3108    *obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
3109    if (dev_data->instance_data->disabled.destroy_fence) return false;
3110    bool skip = false;
3111    if (*fence_node) {
3112        if ((*fence_node)->state == FENCE_INFLIGHT) {
3113            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3114                            HandleToUint64(fence), __LINE__, VALIDATION_ERROR_24e008c0, "DS", "Fence 0x%" PRIx64 " is in use. %s",
3115                            HandleToUint64(fence), validation_error_map[VALIDATION_ERROR_24e008c0]);
3116        }
3117    }
3118    return skip;
3119}
3120
3121static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
3122
3123VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
3124    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3125    // Common data objects used pre & post call
3126    FENCE_NODE *fence_node = nullptr;
3127    VK_OBJECT obj_struct;
3128    unique_lock_t lock(global_lock);
3129    bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
3130
3131    if (!skip) {
3132        lock.unlock();
3133        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
3134        lock.lock();
3135        PostCallRecordDestroyFence(dev_data, fence);
3136    }
3137}
3138
3139static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
3140                                            VK_OBJECT *obj_struct) {
3141    *sema_node = GetSemaphoreNode(dev_data, semaphore);
3142    *obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
3143    if (dev_data->instance_data->disabled.destroy_semaphore) return false;
3144    bool skip = false;
3145    if (*sema_node) {
3146        skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_268008e2);
3147    }
3148    return skip;
3149}
3150
3151static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
3152
3153VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
3154    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3155    SEMAPHORE_NODE *sema_node;
3156    VK_OBJECT obj_struct;
3157    unique_lock_t lock(global_lock);
3158    bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
3159    if (!skip) {
3160        lock.unlock();
3161        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
3162        lock.lock();
3163        PostCallRecordDestroySemaphore(dev_data, semaphore);
3164    }
3165}
3166
3167static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
3168    *event_state = GetEventNode(dev_data, event);
3169    *obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
3170    if (dev_data->instance_data->disabled.destroy_event) return false;
3171    bool skip = false;
3172    if (*event_state) {
3173        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_24c008f2);
3174    }
3175    return skip;
3176}
3177
3178static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
3179    invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
3180    dev_data->eventMap.erase(event);
3181}
3182
3183VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3184    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3185    EVENT_STATE *event_state = nullptr;
3186    VK_OBJECT obj_struct;
3187    unique_lock_t lock(global_lock);
3188    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
3189    if (!skip) {
3190        lock.unlock();
3191        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
3192        lock.lock();
3193        if (event != VK_NULL_HANDLE) {
3194            PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
3195        }
3196    }
3197}
3198
3199static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
3200                                            VK_OBJECT *obj_struct) {
3201    *qp_state = GetQueryPoolNode(dev_data, query_pool);
3202    *obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
3203    if (dev_data->instance_data->disabled.destroy_query_pool) return false;
3204    bool skip = false;
3205    if (*qp_state) {
3206        skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_26200632);
3207    }
3208    return skip;
3209}
3210
3211static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
3212                                           VK_OBJECT obj_struct) {
3213    invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
3214    dev_data->queryPoolMap.erase(query_pool);
3215}
3216
3217VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
3218    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3219    QUERY_POOL_NODE *qp_state = nullptr;
3220    VK_OBJECT obj_struct;
3221    unique_lock_t lock(global_lock);
3222    bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
3223    if (!skip) {
3224        lock.unlock();
3225        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
3226        lock.lock();
3227        if (queryPool != VK_NULL_HANDLE) {
3228            PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
3229        }
3230    }
3231}
3232static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3233                                               uint32_t query_count, VkQueryResultFlags flags,
3234                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3235    // TODO: clean this up, it's insanely wasteful.
3236    for (auto cmd_buffer : dev_data->commandBufferMap) {
3237        if (cmd_buffer.second->in_use.load()) {
3238            for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
3239                (*queries_in_flight)[query_state_pair.first].push_back(
3240                    cmd_buffer.first);
3241            }
3242        }
3243    }
3244    if (dev_data->instance_data->disabled.get_query_pool_results) return false;
3245    bool skip = false;
3246    for (uint32_t i = 0; i < query_count; ++i) {
3247        QueryObject query = {query_pool, first_query + i};
3248        auto qif_pair = queries_in_flight->find(query);
3249        auto query_state_pair = dev_data->queryToStateMap.find(query);
3250        if (query_state_pair != dev_data->queryToStateMap.end()) {
3251            // Available and in flight
3252            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3253                query_state_pair->second) {
3254                for (auto cmd_buffer : qif_pair->second) {
3255                    auto cb = GetCBNode(dev_data, cmd_buffer);
3256                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3257                    if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
3258                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3259                                        VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3260                                        "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
3261                                        HandleToUint64(query_pool), first_query + i);
3262                    }
3263                }
3264                // Unavailable and in flight
3265            } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3266                       !query_state_pair->second) {
3267                // TODO : Can there be the same query in use by multiple command buffers in flight?
3268                bool make_available = false;
3269                for (auto cmd_buffer : qif_pair->second) {
3270                    auto cb = GetCBNode(dev_data, cmd_buffer);
3271                    make_available |= cb->queryToStateMap[query];
3272                }
3273                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
3274                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3275                                    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3276                                    "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3277                                    HandleToUint64(query_pool), first_query + i);
3278                }
3279                // Unavailable
3280            } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
3281                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3282                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3283                                "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3284                                HandleToUint64(query_pool), first_query + i);
3285                // Uninitialized
3286            } else if (query_state_pair == dev_data->queryToStateMap.end()) {
3287                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3288                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3289                                "Cannot get query results on queryPool 0x%" PRIx64
3290                                " with index %d as data has not been collected for this index.",
3291                                HandleToUint64(query_pool), first_query + i);
3292            }
3293        }
3294    }
3295    return skip;
3296}
3297
3298static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3299                                              uint32_t query_count,
3300                                              unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3301    for (uint32_t i = 0; i < query_count; ++i) {
3302        QueryObject query = {query_pool, first_query + i};
3303        auto qif_pair = queries_in_flight->find(query);
3304        auto query_state_pair = dev_data->queryToStateMap.find(query);
3305        if (query_state_pair != dev_data->queryToStateMap.end()) {
3306            // Available and in flight
3307            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3308                query_state_pair->second) {
3309                for (auto cmd_buffer : qif_pair->second) {
3310                    auto cb = GetCBNode(dev_data, cmd_buffer);
3311                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3312                    if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
3313                        for (auto event : query_event_pair->second) {
3314                            dev_data->eventMap[event].needsSignaled = true;
3315                        }
3316                    }
3317                }
3318            }
3319        }
3320    }
3321}
3322
3323VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
3324                                                   size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
3325    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3326    unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
3327    unique_lock_t lock(global_lock);
3328    bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
3329    lock.unlock();
3330    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3331    VkResult result =
3332        dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
3333    lock.lock();
3334    PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
3335    lock.unlock();
3336    return result;
3337}
3338
3339// Return true if given ranges intersect, else false
3340// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
3341//  in an error so not checking that here
3342// pad_ranges bool indicates a linear and non-linear comparison which requires padding
3343// In the case where padding is required, if an alias is encountered then a validation error is reported and skip
3344//  may be set by the callback function so caller should merge in skip value if padding case is possible.
3345// This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
3346static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
3347                            bool skip_checks) {
3348    *skip = false;
3349    auto r1_start = range1->start;
3350    auto r1_end = range1->end;
3351    auto r2_start = range2->start;
3352    auto r2_end = range2->end;
3353    VkDeviceSize pad_align = 1;
3354    if (range1->linear != range2->linear) {
3355        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
3356    }
3357    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
3358    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
3359
3360    if (!skip_checks && (range1->linear != range2->linear)) {
3361        // In linear vs. non-linear case, warn of aliasing
3362        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
3363        const char *r1_type_str = range1->image ? "image" : "buffer";
3364        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
3365        const char *r2_type_str = range2->image ? "image" : "buffer";
3366        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
3367        *skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
3368                         MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
3369                                                           " which may indicate a bug. For further info refer to the "
3370                                                           "Buffer-Image Granularity section of the Vulkan specification. "
3371                                                           "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
3372                                                           "xhtml/vkspec.html#resources-bufferimagegranularity)",
3373                         r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
3374    }
3375    // Ranges intersect
3376    return true;
3377}
3378// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
3379bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
3380    // Create a local MEMORY_RANGE struct to wrap offset/size
3381    MEMORY_RANGE range_wrap;
3382    // Synch linear with range1 to avoid padding and potential validation error case
3383    range_wrap.linear = range1->linear;
3384    range_wrap.start = offset;
3385    range_wrap.end = end;
3386    bool tmp_bool;
3387    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
3388}
3389// For given mem_info, set all ranges valid that intersect [offset-end] range
3390// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
3391static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
3392    bool tmp_bool = false;
3393    MEMORY_RANGE map_range = {};
3394    map_range.linear = true;
3395    map_range.start = offset;
3396    map_range.end = end;
3397    for (auto &handle_range_pair : mem_info->bound_ranges) {
3398        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool, false)) {
3399            // TODO : WARN here if tmp_bool true?
3400            handle_range_pair.second.valid = true;
3401        }
3402    }
3403}
3404
3405static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
3406                                      VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
3407                                      bool is_linear, const char *api_name) {
3408    bool skip = false;
3409
3410    MEMORY_RANGE range;
3411    range.image = is_image;
3412    range.handle = handle;
3413    range.linear = is_linear;
3414    range.valid = mem_info->global_valid;
3415    range.memory = mem_info->mem;
3416    range.start = memoryOffset;
3417    range.size = memRequirements.size;
3418    range.end = memoryOffset + memRequirements.size - 1;
3419    range.aliases.clear();
3420
3421    // Check for aliasing problems.
3422    for (auto &obj_range_pair : mem_info->bound_ranges) {
3423        auto check_range = &obj_range_pair.second;
3424        bool intersection_error = false;
3425        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
3426            skip |= intersection_error;
3427            range.aliases.insert(check_range);
3428        }
3429    }
3430
3431    if (memoryOffset >= mem_info->alloc_info.allocationSize) {
3432        UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_1740082c : VALIDATION_ERROR_1700080e;
3433        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3434                       HandleToUint64(mem_info->mem), __LINE__, error_code, "MEM",
3435                       "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
3436                       "), memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ". %s",
3437                       api_name, HandleToUint64(mem_info->mem), handle, memoryOffset, mem_info->alloc_info.allocationSize,
3438                       validation_error_map[error_code]);
3439    }
3440
3441    return skip;
3442}
3443
3444// Object with given handle is being bound to memory w/ given mem_info struct.
3445//  Track the newly bound memory range with given memoryOffset
3446//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
3447//  and non-linear range incorrectly overlap.
3448// Return true if an error is flagged and the user callback returns "true", otherwise false
3449// is_image indicates an image object, otherwise handle is for a buffer
3450// is_linear indicates a buffer or linear image
3451static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
3452                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
3453    MEMORY_RANGE range;
3454
3455    range.image = is_image;
3456    range.handle = handle;
3457    range.linear = is_linear;
3458    range.valid = mem_info->global_valid;
3459    range.memory = mem_info->mem;
3460    range.start = memoryOffset;
3461    range.size = memRequirements.size;
3462    range.end = memoryOffset + memRequirements.size - 1;
3463    range.aliases.clear();
3464    // Update Memory aliasing
3465    // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
3466    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
3467    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
3468    for (auto &obj_range_pair : mem_info->bound_ranges) {
3469        auto check_range = &obj_range_pair.second;
3470        bool intersection_error = false;
3471        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
3472            range.aliases.insert(check_range);
3473            tmp_alias_ranges.insert(check_range);
3474        }
3475    }
3476    mem_info->bound_ranges[handle] = std::move(range);
3477    for (auto tmp_range : tmp_alias_ranges) {
3478        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
3479    }
3480    if (is_image)
3481        mem_info->bound_images.insert(handle);
3482    else
3483        mem_info->bound_buffers.insert(handle);
3484}
3485
3486static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
3487                                           VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
3488                                           const char *api_name) {
3489    return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
3490}
3491static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3492                                   VkMemoryRequirements mem_reqs, bool is_linear) {
3493    InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
3494}
3495
3496static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
3497                                            VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
3498    return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
3499}
3500static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3501                                    VkMemoryRequirements mem_reqs) {
3502    InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
3503}
3504
3505// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
3506//  is_image indicates if handle is for image or buffer
3507//  This function will also remove the handle-to-index mapping from the appropriate
3508//  map and clean up any aliases for range being removed.
3509static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
3510    auto erase_range = &mem_info->bound_ranges[handle];
3511    for (auto alias_range : erase_range->aliases) {
3512        alias_range->aliases.erase(erase_range);
3513    }
3514    erase_range->aliases.clear();
3515    mem_info->bound_ranges.erase(handle);
3516    if (is_image) {
3517        mem_info->bound_images.erase(handle);
3518    } else {
3519        mem_info->bound_buffers.erase(handle);
3520    }
3521}
3522
3523void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
3524
3525void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
3526
3527VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
3528    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3529    BUFFER_STATE *buffer_state = nullptr;
3530    VK_OBJECT obj_struct;
3531    unique_lock_t lock(global_lock);
3532    bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
3533    if (!skip) {
3534        lock.unlock();
3535        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
3536        lock.lock();
3537        if (buffer != VK_NULL_HANDLE) {
3538            PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
3539        }
3540    }
3541}
3542
3543VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
3544    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3545    // Common data objects used pre & post call
3546    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
3547    VK_OBJECT obj_struct;
3548    unique_lock_t lock(global_lock);
3549    // Validate state before calling down chain, update common data if we'll be calling down chain
3550    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
3551    if (!skip) {
3552        lock.unlock();
3553        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
3554        lock.lock();
3555        if (bufferView != VK_NULL_HANDLE) {
3556            PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
3557        }
3558    }
3559}
3560
3561VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
3562    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3563    IMAGE_STATE *image_state = nullptr;
3564    VK_OBJECT obj_struct;
3565    unique_lock_t lock(global_lock);
3566    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
3567    if (!skip) {
3568        lock.unlock();
3569        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
3570        lock.lock();
3571        if (image != VK_NULL_HANDLE) {
3572            PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
3573        }
3574    }
3575}
3576
3577static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
3578                                const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3579    bool skip = false;
3580    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
3581        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3582                       HandleToUint64(mem_info->mem), __LINE__, msgCode, "MT",
3583                       "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
3584                       "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
3585                       funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem),
3586                       validation_error_map[msgCode]);
3587    }
3588    return skip;
3589}
3590
3591static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3592                                            VkDeviceSize memoryOffset) {
3593    bool skip = false;
3594    if (buffer_state) {
3595        unique_lock_t lock(global_lock);
3596        // Track objects tied to memory
3597        uint64_t buffer_handle = HandleToUint64(buffer);
3598        skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
3599        if (!buffer_state->memory_requirements_checked) {
3600            // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
3601            // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
3602            // vkGetBufferMemoryRequirements()
3603            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3604                            buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
3605                            "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
3606                            " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
3607                            buffer_handle);
3608            // Make the call for them so we can verify the state
3609            lock.unlock();
3610            dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
3611            lock.lock();
3612        }
3613
3614        // Validate bound memory range information
3615        auto mem_info = GetMemObjInfo(dev_data, mem);
3616        if (mem_info) {
3617            skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements,
3618                                                    "vkBindBufferMemory()");
3619            skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
3620                                        VALIDATION_ERROR_17000816);
3621        }
3622
3623        // Validate memory requirements alignment
3624        if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
3625            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3626                            buffer_handle, __LINE__, VALIDATION_ERROR_17000818, "DS",
3627                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64
3628                            " but must be an integer multiple of the "
3629                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
3630                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3631                            memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_17000818]);
3632        }
3633
3634        // Validate memory requirements size
3635        if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
3636            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3637                            buffer_handle, __LINE__, VALIDATION_ERROR_1700081a, "DS",
3638                            "vkBindBufferMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
3639                            " but must be at least as large as "
3640                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
3641                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3642                            mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size,
3643                            validation_error_map[VALIDATION_ERROR_1700081a]);
3644        }
3645
3646        // Validate device limits alignments
3647        static const VkBufferUsageFlagBits usage_list[3] = {
3648            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
3649            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
3650        static const char *memory_type[3] = {"texel", "uniform", "storage"};
3651        static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
3652                                             "minStorageBufferOffsetAlignment"};
3653
3654        // TODO:  vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
3655        // clang-format off
3656        static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_17000810, VALIDATION_ERROR_17000812,
3657            VALIDATION_ERROR_17000814 };
3658        // clang-format on
3659
3660        // Keep this one fresh!
3661        const VkDeviceSize offset_requirement[3] = {
3662            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
3663            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
3664            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
3665        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
3666
3667        for (int i = 0; i < 3; i++) {
3668            if (usage & usage_list[i]) {
3669                if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
3670                    skip |= log_msg(
3671                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
3672                        __LINE__, msgCode[i], "DS", "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64
3673                                                    " but must be a multiple of "
3674                                                    "device limit %s 0x%" PRIxLEAST64 ". %s",
3675                        memory_type[i], memoryOffset, offset_name[i], offset_requirement[i], validation_error_map[msgCode[i]]);
3676                }
3677            }
3678        }
3679    }
3680    return skip;
3681}
3682
3683static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3684                                           VkDeviceSize memoryOffset) {
3685    if (buffer_state) {
3686        unique_lock_t lock(global_lock);
3687        // Track bound memory range information
3688        auto mem_info = GetMemObjInfo(dev_data, mem);
3689        if (mem_info) {
3690            InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
3691        }
3692
3693        // Track objects tied to memory
3694        uint64_t buffer_handle = HandleToUint64(buffer);
3695        SetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
3696
3697        buffer_state->binding.mem = mem;
3698        buffer_state->binding.offset = memoryOffset;
3699        buffer_state->binding.size = buffer_state->requirements.size;
3700    }
3701}
3702
3703VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
3704    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3705    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3706    auto buffer_state = GetBufferState(dev_data, buffer);
3707    bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
3708    if (!skip) {
3709        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
3710        if (result == VK_SUCCESS) {
3711            PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
3712        }
3713    }
3714    return result;
3715}
3716
3717VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
3718                                                       VkMemoryRequirements *pMemoryRequirements) {
3719    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3720    dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
3721    auto buffer_state = GetBufferState(dev_data, buffer);
3722    if (buffer_state) {
3723        buffer_state->requirements = *pMemoryRequirements;
3724        buffer_state->memory_requirements_checked = true;
3725    }
3726}
3727
3728VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
3729    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3730    dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
3731    auto image_state = GetImageState(dev_data, image);
3732    if (image_state) {
3733        image_state->requirements = *pMemoryRequirements;
3734        image_state->memory_requirements_checked = true;
3735    }
3736}
3737
3738VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
3739    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3740    // Common data objects used pre & post call
3741    IMAGE_VIEW_STATE *image_view_state = nullptr;
3742    VK_OBJECT obj_struct;
3743    unique_lock_t lock(global_lock);
3744    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
3745    if (!skip) {
3746        lock.unlock();
3747        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
3748        lock.lock();
3749        if (imageView != VK_NULL_HANDLE) {
3750            PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
3751        }
3752    }
3753}
3754
3755VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
3756                                               const VkAllocationCallbacks *pAllocator) {
3757    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3758
3759    unique_lock_t lock(global_lock);
3760    dev_data->shaderModuleMap.erase(shaderModule);
3761    lock.unlock();
3762
3763    dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
3764}
3765
3766static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
3767                                           VK_OBJECT *obj_struct) {
3768    *pipeline_state = getPipelineState(dev_data, pipeline);
3769    *obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
3770    if (dev_data->instance_data->disabled.destroy_pipeline) return false;
3771    bool skip = false;
3772    if (*pipeline_state) {
3773        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_25c005fa);
3774    }
3775    return skip;
3776}
3777
3778static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
3779                                          VK_OBJECT obj_struct) {
3780    // Any bound cmd buffers are now invalid
3781    invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
3782    delete getPipelineState(dev_data, pipeline);
3783    dev_data->pipelineMap.erase(pipeline);
3784}
3785
3786VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
3787    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3788    PIPELINE_STATE *pipeline_state = nullptr;
3789    VK_OBJECT obj_struct;
3790    unique_lock_t lock(global_lock);
3791    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
3792    if (!skip) {
3793        lock.unlock();
3794        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
3795        lock.lock();
3796        if (pipeline != VK_NULL_HANDLE) {
3797            PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
3798        }
3799    }
3800}
3801
3802VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
3803                                                 const VkAllocationCallbacks *pAllocator) {
3804    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3805    unique_lock_t lock(global_lock);
3806    dev_data->pipelineLayoutMap.erase(pipelineLayout);
3807    lock.unlock();
3808
3809    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
3810}
3811
3812static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
3813                                          VK_OBJECT *obj_struct) {
3814    *sampler_state = GetSamplerState(dev_data, sampler);
3815    *obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
3816    if (dev_data->instance_data->disabled.destroy_sampler) return false;
3817    bool skip = false;
3818    if (*sampler_state) {
3819        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_26600874);
3820    }
3821    return skip;
3822}
3823
3824static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
3825                                         VK_OBJECT obj_struct) {
3826    // Any bound cmd buffers are now invalid
3827    if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
3828    dev_data->samplerMap.erase(sampler);
3829}
3830
3831VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
3832    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3833    SAMPLER_STATE *sampler_state = nullptr;
3834    VK_OBJECT obj_struct;
3835    unique_lock_t lock(global_lock);
3836    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
3837    if (!skip) {
3838        lock.unlock();
3839        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
3840        lock.lock();
3841        if (sampler != VK_NULL_HANDLE) {
3842            PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
3843        }
3844    }
3845}
3846
3847static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
3848    dev_data->descriptorSetLayoutMap.erase(ds_layout);
3849}
3850
3851VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
3852                                                      const VkAllocationCallbacks *pAllocator) {
3853    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3854    dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
3855    unique_lock_t lock(global_lock);
3856    PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
3857}
3858
3859static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
3860                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
3861    *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
3862    *obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
3863    if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
3864    bool skip = false;
3865    if (*desc_pool_state) {
3866        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_2440025e);
3867    }
3868    return skip;
3869}
3870
3871static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
3872                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
3873    // Any bound cmd buffers are now invalid
3874    invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
3875    // Free sets that were in this pool
3876    for (auto ds : desc_pool_state->sets) {
3877        freeDescriptorSet(dev_data, ds);
3878    }
3879    dev_data->descriptorPoolMap.erase(descriptorPool);
3880    delete desc_pool_state;
3881}
3882
3883VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
3884                                                 const VkAllocationCallbacks *pAllocator) {
3885    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3886    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
3887    VK_OBJECT obj_struct;
3888    unique_lock_t lock(global_lock);
3889    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
3890    if (!skip) {
3891        lock.unlock();
3892        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
3893        lock.lock();
3894        if (descriptorPool != VK_NULL_HANDLE) {
3895            PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
3896        }
3897    }
3898}
3899// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
3900//  If this is a secondary command buffer, then make sure its primary is also in-flight
3901//  If primary is not in-flight, then remove secondary from global in-flight set
3902// This function is only valid at a point when cmdBuffer is being reset or freed
3903static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
3904                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
3905    bool skip = false;
3906    if (cb_node->in_use.load()) {
3907        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3908                        HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
3909                        "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
3910                        validation_error_map[error_code]);
3911    }
3912    return skip;
3913}
3914
3915// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
3916static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
3917                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
3918    bool skip = false;
3919    for (auto cmd_buffer : pPool->commandBuffers) {
3920        skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
3921    }
3922    return skip;
3923}
3924
3925VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
3926                                              const VkCommandBuffer *pCommandBuffers) {
3927    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3928    bool skip = false;
3929    unique_lock_t lock(global_lock);
3930
3931    for (uint32_t i = 0; i < commandBufferCount; i++) {
3932        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
3933        // Delete CB information structure, and remove from commandBufferMap
3934        if (cb_node) {
3935            skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_2840005e);
3936        }
3937    }
3938
3939    if (skip) return;
3940
3941    auto pPool = GetCommandPoolNode(dev_data, commandPool);
3942    for (uint32_t i = 0; i < commandBufferCount; i++) {
3943        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
3944        // Delete CB information structure, and remove from commandBufferMap
3945        if (cb_node) {
3946            // reset prior to delete for data clean-up
3947            // TODO: fix this, it's insane.
3948            resetCB(dev_data, cb_node->commandBuffer);
3949            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
3950            delete cb_node;
3951        }
3952
3953        // Remove commandBuffer reference from commandPoolMap
3954        pPool->commandBuffers.remove(pCommandBuffers[i]);
3955    }
3956    lock.unlock();
3957
3958    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
3959}
3960
3961VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
3962                                                 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
3963    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3964
3965    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
3966
3967    if (VK_SUCCESS == result) {
3968        lock_guard_t lock(global_lock);
3969        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
3970        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
3971    }
3972    return result;
3973}
3974
3975VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
3976                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
3977    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3978    bool skip = false;
3979    if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
3980        if (!dev_data->enabled_features.pipelineStatisticsQuery) {
3981            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3982                            __LINE__, VALIDATION_ERROR_11c0062e, "DS",
3983                            "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
3984                            "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
3985                            validation_error_map[VALIDATION_ERROR_11c0062e]);
3986        }
3987    }
3988
3989    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3990    if (!skip) {
3991        result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
3992    }
3993    if (result == VK_SUCCESS) {
3994        lock_guard_t lock(global_lock);
3995        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
3996        qp_node->createInfo = *pCreateInfo;
3997    }
3998    return result;
3999}
4000
4001static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
4002    *cp_state = GetCommandPoolNode(dev_data, pool);
4003    if (dev_data->instance_data->disabled.destroy_command_pool) return false;
4004    bool skip = false;
4005    if (*cp_state) {
4006        // Verify that command buffers in pool are complete (not in-flight)
4007        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_24000052);
4008    }
4009    return skip;
4010}
4011
4012static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
4013    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
4014    for (auto cb : cp_state->commandBuffers) {
4015        auto cb_node = GetCBNode(dev_data, cb);
4016        clear_cmd_buf_and_mem_references(dev_data, cb_node);
4017        // Remove references to this cb_node prior to delete
4018        // TODO : Need better solution here, resetCB?
4019        for (auto obj : cb_node->object_bindings) {
4020            removeCommandBufferBinding(dev_data, &obj, cb_node);
4021        }
4022        for (auto framebuffer : cb_node->framebuffers) {
4023            auto fb_state = GetFramebufferState(dev_data, framebuffer);
4024            if (fb_state) fb_state->cb_bindings.erase(cb_node);
4025        }
4026        dev_data->commandBufferMap.erase(cb);  // Remove this command buffer
4027        delete cb_node;                        // delete CB info structure
4028    }
4029    dev_data->commandPoolMap.erase(pool);
4030}
4031
4032// Destroy commandPool along with all of the commandBuffers allocated from that pool
4033VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
4034    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4035    COMMAND_POOL_NODE *cp_state = nullptr;
4036    unique_lock_t lock(global_lock);
4037    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
4038    if (!skip) {
4039        lock.unlock();
4040        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
4041        lock.lock();
4042        if (commandPool != VK_NULL_HANDLE) {
4043            PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
4044        }
4045    }
4046}
4047
4048VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4049    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4050    bool skip = false;
4051
4052    unique_lock_t lock(global_lock);
4053    auto pPool = GetCommandPoolNode(dev_data, commandPool);
4054    skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_32800050);
4055    lock.unlock();
4056
4057    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4058
4059    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
4060
4061    // Reset all of the CBs allocated from this pool
4062    if (VK_SUCCESS == result) {
4063        lock.lock();
4064        for (auto cmdBuffer : pPool->commandBuffers) {
4065            resetCB(dev_data, cmdBuffer);
4066        }
4067        lock.unlock();
4068    }
4069    return result;
4070}
4071
4072VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4073    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4074    bool skip = false;
4075    unique_lock_t lock(global_lock);
4076    for (uint32_t i = 0; i < fenceCount; ++i) {
4077        auto pFence = GetFenceNode(dev_data, pFences[i]);
4078        if (pFence && pFence->state == FENCE_INFLIGHT) {
4079            skip |=
4080                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4081                        HandleToUint64(pFences[i]), __LINE__, VALIDATION_ERROR_32e008c6, "DS", "Fence 0x%" PRIx64 " is in use. %s",
4082                        HandleToUint64(pFences[i]), validation_error_map[VALIDATION_ERROR_32e008c6]);
4083        }
4084    }
4085    lock.unlock();
4086
4087    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4088
4089    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
4090
4091    if (result == VK_SUCCESS) {
4092        lock.lock();
4093        for (uint32_t i = 0; i < fenceCount; ++i) {
4094            auto pFence = GetFenceNode(dev_data, pFences[i]);
4095            if (pFence) {
4096                pFence->state = FENCE_UNSIGNALED;
4097            }
4098        }
4099        lock.unlock();
4100    }
4101
4102    return result;
4103}
4104
4105// For given cb_nodes, invalidate them and track object causing invalidation
4106void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
4107    for (auto cb_node : cb_nodes) {
4108        if (cb_node->state == CB_RECORDING) {
4109            log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4110                    HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4111                    "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
4112            cb_node->state = CB_INVALID_INCOMPLETE;
4113        }
4114        else if (cb_node->state == CB_RECORDED) {
4115            cb_node->state = CB_INVALID_COMPLETE;
4116        }
4117        cb_node->broken_bindings.push_back(obj);
4118
4119        // if secondary, then propagate the invalidation to the primaries that will call us.
4120        if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4121            invalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4122        }
4123    }
4124}
4125
4126static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
4127                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
4128    *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
4129    *obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4130    if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
4131    bool skip = false;
4132    if (*framebuffer_state) {
4133        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_250006f8);
4134    }
4135    return skip;
4136}
4137
4138static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
4139                                             VK_OBJECT obj_struct) {
4140    invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
4141    dev_data->frameBufferMap.erase(framebuffer);
4142}
4143
4144VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
4145    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4146    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
4147    VK_OBJECT obj_struct;
4148    unique_lock_t lock(global_lock);
4149    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
4150    if (!skip) {
4151        lock.unlock();
4152        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
4153        lock.lock();
4154        if (framebuffer != VK_NULL_HANDLE) {
4155            PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
4156        }
4157    }
4158}
4159
4160static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
4161                                             VK_OBJECT *obj_struct) {
4162    *rp_state = GetRenderPassState(dev_data, render_pass);
4163    *obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
4164    if (dev_data->instance_data->disabled.destroy_renderpass) return false;
4165    bool skip = false;
4166    if (*rp_state) {
4167        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_264006d2);
4168    }
4169    return skip;
4170}
4171
4172static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
4173                                            VK_OBJECT obj_struct) {
4174    invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
4175    dev_data->renderPassMap.erase(render_pass);
4176}
4177
4178VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4179    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4180    RENDER_PASS_STATE *rp_state = nullptr;
4181    VK_OBJECT obj_struct;
4182    unique_lock_t lock(global_lock);
4183    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
4184    if (!skip) {
4185        lock.unlock();
4186        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
4187        lock.lock();
4188        if (renderPass != VK_NULL_HANDLE) {
4189            PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
4190        }
4191    }
4192}
4193
4194VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
4195                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
4196    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4197    unique_lock_t lock(global_lock);
4198    bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
4199    lock.unlock();
4200
4201    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4202    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
4203
4204    if (VK_SUCCESS == result) {
4205        lock.lock();
4206        PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
4207        lock.unlock();
4208    }
4209    return result;
4210}
4211
4212VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
4213                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
4214    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4215    unique_lock_t lock(global_lock);
4216    bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
4217    lock.unlock();
4218    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4219    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
4220    if (VK_SUCCESS == result) {
4221        lock.lock();
4222        PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
4223        lock.unlock();
4224    }
4225    return result;
4226}
4227
4228// Access helper functions for external modules
4229const VkFormatProperties *GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
4230    VkFormatProperties *format_properties = new VkFormatProperties;
4231    instance_layer_data *instance_data =
4232        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4233    instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, format_properties);
4234    return format_properties;
4235}
4236
4237const VkImageFormatProperties *GetImageFormatProperties(core_validation::layer_data *device_data, VkFormat format,
4238                                                        VkImageType image_type, VkImageTiling tiling, VkImageUsageFlags usage,
4239                                                        VkImageCreateFlags flags) {
4240    VkImageFormatProperties *image_format_properties = new VkImageFormatProperties;
4241    instance_layer_data *instance_data =
4242        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4243    instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(device_data->physical_device, format, image_type, tiling,
4244                                                                         usage, flags, image_format_properties);
4245    return image_format_properties;
4246}
4247
4248const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
4249
4250const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
4251    return &device_data->phys_dev_props;
4252}
4253
4254const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
4255
4256std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
4257    return &device_data->imageMap;
4258}
4259
4260std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
4261    return &device_data->imageSubresourceMap;
4262}
4263
4264std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
4265    return &device_data->imageLayoutMap;
4266}
4267
4268std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
4269    return &device_data->imageLayoutMap;
4270}
4271
4272std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
4273    return &device_data->bufferMap;
4274}
4275
4276std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
4277    return &device_data->bufferViewMap;
4278}
4279
4280std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
4281    return &device_data->imageViewMap;
4282}
4283
4284const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) {
4285    return &device_data->phys_dev_properties;
4286}
4287
4288const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) {
4289    return &device_data->enabled_features;
4290}
4291
4292const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
4293
4294VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
4295                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
4296    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4297    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4298    bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
4299    if (!skip) {
4300        result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
4301    }
4302    if (VK_SUCCESS == result) {
4303        lock_guard_t lock(global_lock);
4304        PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
4305    }
4306    return result;
4307}
4308
4309VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
4310                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
4311    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4312    unique_lock_t lock(global_lock);
4313    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
4314    lock.unlock();
4315    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4316    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
4317    if (VK_SUCCESS == result) {
4318        lock.lock();
4319        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
4320        lock.unlock();
4321    }
4322
4323    return result;
4324}
4325
4326VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
4327                                           const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
4328    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4329    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
4330    if (VK_SUCCESS == result) {
4331        lock_guard_t lock(global_lock);
4332        auto &fence_node = dev_data->fenceMap[*pFence];
4333        fence_node.fence = *pFence;
4334        fence_node.createInfo = *pCreateInfo;
4335        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
4336    }
4337    return result;
4338}
4339
4340// TODO handle pipeline caches
4341VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
4342                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
4343    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4344    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
4345    return result;
4346}
4347
4348VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
4349                                                const VkAllocationCallbacks *pAllocator) {
4350    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4351    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
4352}
4353
4354VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
4355                                                    void *pData) {
4356    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4357    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
4358    return result;
4359}
4360
4361VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
4362                                                   const VkPipelineCache *pSrcCaches) {
4363    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4364    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
4365    return result;
4366}
4367
4368// utility function to set collective state for pipeline
4369void set_pipeline_state(PIPELINE_STATE *pPipe) {
4370    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
4371    if (pPipe->graphicsPipelineCI.pColorBlendState) {
4372        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
4373            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
4374                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4375                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4376                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4377                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4378                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4379                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4380                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4381                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
4382                    pPipe->blendConstantsEnabled = true;
4383                }
4384            }
4385        }
4386    }
4387}
4388
4389bool validate_dual_src_blend_feature(layer_data *device_data, PIPELINE_STATE *pipe_state) {
4390    bool skip = false;
4391    if (pipe_state->graphicsPipelineCI.pColorBlendState) {
4392        for (size_t i = 0; i < pipe_state->attachments.size(); ++i) {
4393            if (!device_data->enabled_features.dualSrcBlend) {
4394                if ((pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4395                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4396                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4397                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA) ||
4398                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4399                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4400                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4401                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
4402                    skip |=
4403                        log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
4404                                HandleToUint64(pipe_state->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
4405                                "CmdBindPipeline: vkPipeline (0x%" PRIxLEAST64 ") attachment[" PRINTF_SIZE_T_SPECIFIER
4406                                "] has a dual-source blend factor but this device feature is not enabled.",
4407                                HandleToUint64(pipe_state->pipeline), i);
4408                }
4409            }
4410        }
4411    }
4412    return skip;
4413}
4414
4415VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4416                                                       const VkGraphicsPipelineCreateInfo *pCreateInfos,
4417                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4418    // TODO What to do with pipelineCache?
4419    // The order of operations here is a little convoluted but gets the job done
4420    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
4421    //  2. Create state is then validated (which uses flags setup during shadowing)
4422    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
4423    bool skip = false;
4424    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
4425    vector<PIPELINE_STATE *> pipe_state(count);
4426    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4427
4428    uint32_t i = 0;
4429    unique_lock_t lock(global_lock);
4430
4431    for (i = 0; i < count; i++) {
4432        pipe_state[i] = new PIPELINE_STATE;
4433        pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
4434        pipe_state[i]->render_pass_ci.initialize(GetRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
4435        pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4436    }
4437
4438    for (i = 0; i < count; i++) {
4439        skip |= ValidatePipelineLocked(dev_data, pipe_state, i);
4440    }
4441
4442    lock.unlock();
4443
4444    for (i = 0; i < count; i++) {
4445        skip |= ValidatePipelineUnlocked(dev_data, pipe_state, i);
4446    }
4447
4448    if (skip) {
4449        for (i = 0; i < count; i++) {
4450            delete pipe_state[i];
4451            pPipelines[i] = VK_NULL_HANDLE;
4452        }
4453        return VK_ERROR_VALIDATION_FAILED_EXT;
4454    }
4455
4456    auto result =
4457        dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4458    lock.lock();
4459    for (i = 0; i < count; i++) {
4460        if (pPipelines[i] == VK_NULL_HANDLE) {
4461            delete pipe_state[i];
4462        } else {
4463            pipe_state[i]->pipeline = pPipelines[i];
4464            dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
4465        }
4466    }
4467
4468    return result;
4469}
4470
4471VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4472                                                      const VkComputePipelineCreateInfo *pCreateInfos,
4473                                                      const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4474    bool skip = false;
4475
4476    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
4477    vector<PIPELINE_STATE *> pPipeState(count);
4478    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4479
4480    uint32_t i = 0;
4481    unique_lock_t lock(global_lock);
4482    for (i = 0; i < count; i++) {
4483        // TODO: Verify compute stage bits
4484
4485        // Create and initialize internal tracking data structure
4486        pPipeState[i] = new PIPELINE_STATE;
4487        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
4488        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4489
4490        // TODO: Add Compute Pipeline Verification
4491        skip |= validate_compute_pipeline(dev_data, pPipeState[i]);
4492    }
4493
4494    if (skip) {
4495        for (i = 0; i < count; i++) {
4496            // Clean up any locally allocated data structures
4497            delete pPipeState[i];
4498            pPipelines[i] = VK_NULL_HANDLE;
4499        }
4500        return VK_ERROR_VALIDATION_FAILED_EXT;
4501    }
4502
4503    lock.unlock();
4504    auto result =
4505        dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4506    lock.lock();
4507    for (i = 0; i < count; i++) {
4508        if (pPipelines[i] == VK_NULL_HANDLE) {
4509            delete pPipeState[i];
4510        } else {
4511            pPipeState[i]->pipeline = pPipelines[i];
4512            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
4513        }
4514    }
4515
4516    return result;
4517}
4518
4519VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
4520                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
4521    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4522    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
4523    if (VK_SUCCESS == result) {
4524        lock_guard_t lock(global_lock);
4525        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
4526    }
4527    return result;
4528}
4529
4530static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
4531    if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
4532    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
4533}
4534
4535static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
4536                                                    VkDescriptorSetLayout set_layout) {
4537    dev_data->descriptorSetLayoutMap[set_layout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(create_info, set_layout);
4538}
4539
4540VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
4541                                                         const VkAllocationCallbacks *pAllocator,
4542                                                         VkDescriptorSetLayout *pSetLayout) {
4543    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4544    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4545    unique_lock_t lock(global_lock);
4546    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
4547    if (!skip) {
4548        lock.unlock();
4549        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
4550        if (VK_SUCCESS == result) {
4551            lock.lock();
4552            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
4553        }
4554    }
4555    return result;
4556}
4557
4558// Used by CreatePipelineLayout and CmdPushConstants.
4559// Note that the index argument is optional and only used by CreatePipelineLayout.
4560static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
4561                                      const char *caller_name, uint32_t index = 0) {
4562    if (dev_data->instance_data->disabled.push_constant_range) return false;
4563    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
4564    bool skip = false;
4565    // Check that offset + size don't exceed the max.
4566    // Prevent arithetic overflow here by avoiding addition and testing in this order.
4567    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
4568        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
4569        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4570            if (offset >= maxPushConstantsSize) {
4571                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4572                                __LINE__, VALIDATION_ERROR_11a0024c, "DS",
4573                                "%s call has push constants index %u with offset %u that "
4574                                "exceeds this device's maxPushConstantSize of %u. %s",
4575                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_11a0024c]);
4576            }
4577            if (size > maxPushConstantsSize - offset) {
4578                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4579                                __LINE__, VALIDATION_ERROR_11a00254, "DS",
4580                                "%s call has push constants index %u with offset %u and size %u that "
4581                                "exceeds this device's maxPushConstantSize of %u. %s",
4582                                caller_name, index, offset, size, maxPushConstantsSize,
4583                                validation_error_map[VALIDATION_ERROR_11a00254]);
4584            }
4585        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4586            if (offset >= maxPushConstantsSize) {
4587                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4588                                __LINE__, VALIDATION_ERROR_1bc002e4, "DS",
4589                                "%s call has push constants index %u with offset %u that "
4590                                "exceeds this device's maxPushConstantSize of %u. %s",
4591                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_1bc002e4]);
4592            }
4593            if (size > maxPushConstantsSize - offset) {
4594                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4595                                __LINE__, VALIDATION_ERROR_1bc002e6, "DS",
4596                                "%s call has push constants index %u with offset %u and size %u that "
4597                                "exceeds this device's maxPushConstantSize of %u. %s",
4598                                caller_name, index, offset, size, maxPushConstantsSize,
4599                                validation_error_map[VALIDATION_ERROR_1bc002e6]);
4600            }
4601        } else {
4602            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4603                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4604        }
4605    }
4606    // size needs to be non-zero and a multiple of 4.
4607    if ((size == 0) || ((size & 0x3) != 0)) {
4608        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4609            if (size == 0) {
4610                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4611                                __LINE__, VALIDATION_ERROR_11a00250, "DS",
4612                                "%s call has push constants index %u with "
4613                                "size %u. Size must be greater than zero. %s",
4614                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_11a00250]);
4615            }
4616            if (size & 0x3) {
4617                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4618                                __LINE__, VALIDATION_ERROR_11a00252, "DS",
4619                                "%s call has push constants index %u with "
4620                                "size %u. Size must be a multiple of 4. %s",
4621                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_11a00252]);
4622            }
4623        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4624            if (size == 0) {
4625                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4626                                __LINE__, VALIDATION_ERROR_1bc2c21b, "DS",
4627                                "%s call has push constants index %u with "
4628                                "size %u. Size must be greater than zero. %s",
4629                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_1bc2c21b]);
4630            }
4631            if (size & 0x3) {
4632                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4633                                __LINE__, VALIDATION_ERROR_1bc002e2, "DS",
4634                                "%s call has push constants index %u with "
4635                                "size %u. Size must be a multiple of 4. %s",
4636                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_1bc002e2]);
4637            }
4638        } else {
4639            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4640                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4641        }
4642    }
4643    // offset needs to be a multiple of 4.
4644    if ((offset & 0x3) != 0) {
4645        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4646            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4647                            __LINE__, VALIDATION_ERROR_11a0024e, "DS",
4648                            "%s call has push constants index %u with "
4649                            "offset %u. Offset must be a multiple of 4. %s",
4650                            caller_name, index, offset, validation_error_map[VALIDATION_ERROR_11a0024e]);
4651        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4652            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4653                            __LINE__, VALIDATION_ERROR_1bc002e0, "DS",
4654                            "%s call has push constants with "
4655                            "offset %u. Offset must be a multiple of 4. %s",
4656                            caller_name, offset, validation_error_map[VALIDATION_ERROR_1bc002e0]);
4657        } else {
4658            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4659                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4660        }
4661    }
4662    return skip;
4663}
4664
4665VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
4666                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
4667    bool skip = false;
4668    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4669    // TODO : Add checks for VALIDATION_ERRORS 865-870
4670    // Push Constant Range checks
4671    uint32_t i, j;
4672    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4673        skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
4674                                          pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
4675        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
4676            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4677                            __LINE__, VALIDATION_ERROR_11a2dc03, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
4678                            validation_error_map[VALIDATION_ERROR_11a2dc03]);
4679        }
4680    }
4681    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4682
4683    // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
4684    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4685        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
4686            if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
4687                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4688                                __LINE__, VALIDATION_ERROR_0fe00248, "DS",
4689                                "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d. %s", i, j,
4690                                validation_error_map[VALIDATION_ERROR_0fe00248]);
4691            }
4692        }
4693    }
4694
4695    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
4696    if (VK_SUCCESS == result) {
4697        lock_guard_t lock(global_lock);
4698        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
4699        plNode.layout = *pPipelineLayout;
4700        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
4701        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
4702            plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
4703        }
4704        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
4705        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4706            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
4707        }
4708    }
4709    return result;
4710}
4711
4712VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
4713                                                    const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
4714    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4715    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
4716    if (VK_SUCCESS == result) {
4717        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
4718        if (NULL == pNewNode) {
4719            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4720                        HandleToUint64(*pDescriptorPool), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4721                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
4722                return VK_ERROR_VALIDATION_FAILED_EXT;
4723        } else {
4724            lock_guard_t lock(global_lock);
4725            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
4726        }
4727    } else {
4728        // Need to do anything if pool create fails?
4729    }
4730    return result;
4731}
4732
4733VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4734                                                   VkDescriptorPoolResetFlags flags) {
4735    // TODO : Add checks for VALIDATION_ERROR_32a00272
4736    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4737    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
4738    if (VK_SUCCESS == result) {
4739        lock_guard_t lock(global_lock);
4740        clearDescriptorPool(dev_data, device, descriptorPool, flags);
4741    }
4742    return result;
4743}
4744// Ensure the pool contains enough descriptors and descriptor sets to satisfy
4745// an allocation request. Fills common_data with the total number of descriptors of each type required,
4746// as well as DescriptorSetLayout ptrs used for later update.
4747static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4748                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
4749    // Always update common data
4750    cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
4751    if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
4752    // All state checks for AllocateDescriptorSets is done in single function
4753    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
4754}
4755// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
4756static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4757                                                 VkDescriptorSet *pDescriptorSets,
4758                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
4759    // All the updates are contained in a single cvdescriptorset function
4760    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
4761                                                   &dev_data->setMap, dev_data);
4762}
4763
4764// TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
4765VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4766                                                      VkDescriptorSet *pDescriptorSets) {
4767    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4768    unique_lock_t lock(global_lock);
4769    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
4770    bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
4771    lock.unlock();
4772
4773    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4774
4775    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
4776
4777    if (VK_SUCCESS == result) {
4778        lock.lock();
4779        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
4780        lock.unlock();
4781    }
4782    return result;
4783}
4784// Verify state before freeing DescriptorSets
4785static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
4786                                              const VkDescriptorSet *descriptor_sets) {
4787    if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
4788    bool skip = false;
4789    // First make sure sets being destroyed are not currently in-use
4790    for (uint32_t i = 0; i < count; ++i) {
4791        if (descriptor_sets[i] != VK_NULL_HANDLE) {
4792            skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
4793        }
4794    }
4795
4796    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
4797    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
4798        // Can't Free from a NON_FREE pool
4799        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4800                        HandleToUint64(pool), __LINE__, VALIDATION_ERROR_28600270, "DS",
4801                        "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
4802                        "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
4803                        validation_error_map[VALIDATION_ERROR_28600270]);
4804    }
4805    return skip;
4806}
4807// Sets have been removed from the pool so update underlying state
4808static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
4809                                             const VkDescriptorSet *descriptor_sets) {
4810    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
4811    // Update available descriptor sets in pool
4812    pool_state->availableSets += count;
4813
4814    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
4815    for (uint32_t i = 0; i < count; ++i) {
4816        if (descriptor_sets[i] != VK_NULL_HANDLE) {
4817            auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
4818            uint32_t type_index = 0, descriptor_count = 0;
4819            for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
4820                type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
4821                descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
4822                pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
4823            }
4824            freeDescriptorSet(dev_data, descriptor_set);
4825            pool_state->sets.erase(descriptor_set);
4826        }
4827    }
4828}
4829
4830VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
4831                                                  const VkDescriptorSet *pDescriptorSets) {
4832    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4833    // Make sure that no sets being destroyed are in-flight
4834    unique_lock_t lock(global_lock);
4835    bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
4836    lock.unlock();
4837
4838    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4839    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
4840    if (VK_SUCCESS == result) {
4841        lock.lock();
4842        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
4843        lock.unlock();
4844    }
4845    return result;
4846}
4847// TODO : This is a Proof-of-concept for core validation architecture
4848//  Really we'll want to break out these functions to separate files but
4849//  keeping it all together here to prove out design
4850// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
4851static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
4852                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4853                                                const VkCopyDescriptorSet *pDescriptorCopies) {
4854    if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
4855    // First thing to do is perform map look-ups.
4856    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
4857    //  so we can't just do a single map look-up up-front, but do them individually in functions below
4858
4859    // Now make call(s) that validate state, but don't perform state updates in this function
4860    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
4861    //  namespace which will parse params and make calls into specific class instances
4862    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
4863                                                         descriptorCopyCount, pDescriptorCopies);
4864}
4865// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
4866static void PreCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
4867                                              const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4868                                              const VkCopyDescriptorSet *pDescriptorCopies) {
4869    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4870                                                 pDescriptorCopies);
4871}
4872
4873VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
4874                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4875                                                const VkCopyDescriptorSet *pDescriptorCopies) {
4876    // Only map look-up at top level is for device-level layer_data
4877    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4878    unique_lock_t lock(global_lock);
4879    bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4880                                                    pDescriptorCopies);
4881    if (!skip) {
4882        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state & we can update before call down chain
4883        PreCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4884                                          pDescriptorCopies);
4885        lock.unlock();
4886        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4887                                                      pDescriptorCopies);
4888    }
4889}
4890
4891VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
4892                                                      VkCommandBuffer *pCommandBuffer) {
4893    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4894    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
4895    if (VK_SUCCESS == result) {
4896        unique_lock_t lock(global_lock);
4897        auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
4898
4899        if (pPool) {
4900            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
4901                // Add command buffer to its commandPool map
4902                pPool->commandBuffers.push_back(pCommandBuffer[i]);
4903                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
4904                // Add command buffer to map
4905                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
4906                resetCB(dev_data, pCommandBuffer[i]);
4907                pCB->createInfo = *pCreateInfo;
4908                pCB->device = device;
4909            }
4910        }
4911        lock.unlock();
4912    }
4913    return result;
4914}
4915
4916// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
4917static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
4918    addCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
4919                            cb_state);
4920    for (auto attachment : fb_state->attachments) {
4921        auto view_state = attachment.view_state;
4922        if (view_state) {
4923            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
4924        }
4925        auto rp_state = GetRenderPassState(dev_data, fb_state->createInfo.renderPass);
4926        if (rp_state) {
4927            addCommandBufferBinding(&rp_state->cb_bindings, {HandleToUint64(rp_state->renderPass), kVulkanObjectTypeRenderPass},
4928                                    cb_state);
4929        }
4930    }
4931}
4932
4933VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
4934    bool skip = false;
4935    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
4936    unique_lock_t lock(global_lock);
4937    // Validate command buffer level
4938    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
4939    if (cb_node) {
4940        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
4941        if (cb_node->in_use.load()) {
4942            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4943                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "MEM",
4944                            "Calling vkBeginCommandBuffer() on active command buffer %p before it has completed. "
4945                            "You must check command buffer fence before this call. %s",
4946                            commandBuffer, validation_error_map[VALIDATION_ERROR_16e00062]);
4947        }
4948        clear_cmd_buf_and_mem_references(dev_data, cb_node);
4949        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
4950            // Secondary Command Buffer
4951            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
4952            if (!pInfo) {
4953                skip |=
4954                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4955                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00066, "DS",
4956                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s", commandBuffer,
4957                            validation_error_map[VALIDATION_ERROR_16e00066]);
4958            } else {
4959                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
4960                    assert(pInfo->renderPass);
4961                    string errorString = "";
4962                    auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
4963                    if (framebuffer) {
4964                        if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
4965                            !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
4966                                                             GetRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
4967                                                             errorString)) {
4968                            // renderPass that framebuffer was created with must be compatible with local renderPass
4969                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4970                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4971                                            VALIDATION_ERROR_0280006e, "DS",
4972                                            "vkBeginCommandBuffer(): Secondary Command "
4973                                            "Buffer (0x%p) renderPass (0x%" PRIxLEAST64
4974                                            ") is incompatible w/ framebuffer "
4975                                            "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
4976                                            commandBuffer, HandleToUint64(pInfo->renderPass), HandleToUint64(pInfo->framebuffer),
4977                                            HandleToUint64(framebuffer->createInfo.renderPass), errorString.c_str(),
4978                                            validation_error_map[VALIDATION_ERROR_0280006e]);
4979                        }
4980                        // Connect this framebuffer and its children to this cmdBuffer
4981                        AddFramebufferBinding(dev_data, cb_node, framebuffer);
4982                    }
4983                }
4984                if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
4985                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
4986                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4987                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4988                                    VALIDATION_ERROR_16e00068, "DS",
4989                                    "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
4990                                    "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
4991                                    "support precise occlusion queries. %s",
4992                                    commandBuffer, validation_error_map[VALIDATION_ERROR_16e00068]);
4993                }
4994            }
4995            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
4996                auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
4997                if (renderPass) {
4998                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
4999                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5000                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
5001                                        VALIDATION_ERROR_0280006c, "DS",
5002                                        "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
5003                                        "that is less than the number of subpasses (%d). %s",
5004                                        commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
5005                                        validation_error_map[VALIDATION_ERROR_0280006c]);
5006                    }
5007                }
5008            }
5009        }
5010        if (CB_RECORDING == cb_node->state) {
5011            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5012                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "DS",
5013                            "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
5014                            ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
5015                            commandBuffer, validation_error_map[VALIDATION_ERROR_16e00062]);
5016        } else if (CB_RECORDED == cb_node->state || CB_INVALID_COMPLETE == cb_node->state) {
5017            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
5018            auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5019            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5020                skip |=
5021                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5022                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00064, "DS",
5023                            "Call to vkBeginCommandBuffer() on command buffer (0x%p"
5024                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
5025                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5026                            commandBuffer, HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_16e00064]);
5027            }
5028            resetCB(dev_data, commandBuffer);
5029        }
5030        // Set updated state here in case implicit reset occurs above
5031        cb_node->state = CB_RECORDING;
5032        cb_node->beginInfo = *pBeginInfo;
5033        if (cb_node->beginInfo.pInheritanceInfo) {
5034            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
5035            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
5036            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
5037            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
5038                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5039                cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
5040                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
5041                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
5042                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
5043            }
5044        }
5045    }
5046    lock.unlock();
5047    if (skip) {
5048        return VK_ERROR_VALIDATION_FAILED_EXT;
5049    }
5050    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
5051
5052    return result;
5053}
5054
5055VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
5056    bool skip = false;
5057    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5058    unique_lock_t lock(global_lock);
5059    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5060    if (pCB) {
5061        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
5062            !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5063            // This needs spec clarification to update valid usage, see comments in PR:
5064            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
5065            skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_27400078);
5066        }
5067        skip |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
5068        for (auto query : pCB->activeQueries) {
5069            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5070                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_2740007a, "DS",
5071                            "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
5072                            HandleToUint64(query.pool), query.index, validation_error_map[VALIDATION_ERROR_2740007a]);
5073        }
5074    }
5075    if (!skip) {
5076        lock.unlock();
5077        auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
5078        lock.lock();
5079        if (VK_SUCCESS == result) {
5080            pCB->state = CB_RECORDED;
5081        }
5082        return result;
5083    } else {
5084        return VK_ERROR_VALIDATION_FAILED_EXT;
5085    }
5086}
5087
5088VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
5089    bool skip = false;
5090    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5091    unique_lock_t lock(global_lock);
5092    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5093    VkCommandPool cmdPool = pCB->createInfo.commandPool;
5094    auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5095    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5096        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5097                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_3260005c, "DS",
5098                        "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
5099                        ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5100                        commandBuffer, HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_3260005c]);
5101    }
5102    skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_3260005a);
5103    lock.unlock();
5104    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5105    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
5106    if (VK_SUCCESS == result) {
5107        lock.lock();
5108        resetCB(dev_data, commandBuffer);
5109        lock.unlock();
5110    }
5111    return result;
5112}
5113
5114VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5115                                           VkPipeline pipeline) {
5116    bool skip = false;
5117    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5118    unique_lock_t lock(global_lock);
5119    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5120    if (cb_state) {
5121        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5122                                      VALIDATION_ERROR_18002415);
5123        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
5124        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
5125            skip |=
5126                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5127                        HandleToUint64(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
5128                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
5129                        HandleToUint64(pipeline), HandleToUint64(cb_state->activeRenderPass->renderPass));
5130        }
5131        // TODO: VALIDATION_ERROR_18000612 VALIDATION_ERROR_18000616
5132
5133        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
5134        if (pipe_state) {
5135            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
5136            pCB->status |= MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState);
5137            set_pipeline_state(pipe_state);
5138            skip |= validate_dual_src_blend_feature(dev_data, pipe_state);
5139        } else {
5140            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5141                            HandleToUint64(pipeline), __LINE__, VALIDATION_ERROR_18027e01, "DS",
5142                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", HandleToUint64(pipeline),
5143                            validation_error_map[VALIDATION_ERROR_18027e01]);
5144        }
5145        addCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
5146        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
5147            // Add binding for child renderpass
5148            auto rp_state = GetRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
5149            if (rp_state) {
5150                addCommandBufferBinding(&rp_state->cb_bindings, {HandleToUint64(rp_state->renderPass), kVulkanObjectTypeRenderPass},
5151                                        cb_state);
5152            }
5153        }
5154    }
5155    lock.unlock();
5156    if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
5157}
5158
5159VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
5160                                          const VkViewport *pViewports) {
5161    bool skip = false;
5162    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5163    unique_lock_t lock(global_lock);
5164    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5165    if (pCB) {
5166        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1e002415);
5167        skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
5168        pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
5169    }
5170    lock.unlock();
5171    if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
5172}
5173
5174VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
5175                                         const VkRect2D *pScissors) {
5176    bool skip = false;
5177    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5178    unique_lock_t lock(global_lock);
5179    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5180    if (pCB) {
5181        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d802415);
5182        skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
5183        pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
5184    }
5185    lock.unlock();
5186    if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
5187}
5188
5189VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
5190    bool skip = false;
5191    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5192    unique_lock_t lock(global_lock);
5193    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5194    if (pCB) {
5195        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d602415);
5196        skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
5197        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
5198
5199        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
5200        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
5201            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5202                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1d600626, "DS",
5203                            "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
5204                            "flag.  This is undefined behavior and could be ignored. %s",
5205                            validation_error_map[VALIDATION_ERROR_1d600626]);
5206        } else {
5207            skip |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, kVulkanObjectTypeCommandBuffer, HandleToUint64(commandBuffer),
5208                                    lineWidth);
5209        }
5210    }
5211    lock.unlock();
5212    if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
5213}
5214
5215VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
5216                                           float depthBiasSlopeFactor) {
5217    bool skip = false;
5218    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5219    unique_lock_t lock(global_lock);
5220    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5221    if (pCB) {
5222        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1cc02415);
5223        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
5224        if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
5225            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5226                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1cc0062c, "DS",
5227                            "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
5228                            "parameter must be set to 0.0. %s",
5229                            validation_error_map[VALIDATION_ERROR_1cc0062c]);
5230        }
5231        if (!skip) {
5232            pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
5233        }
5234    }
5235    lock.unlock();
5236    if (!skip)
5237        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
5238}
5239
5240VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
5241    bool skip = false;
5242    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5243    unique_lock_t lock(global_lock);
5244    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5245    if (pCB) {
5246        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ca02415);
5247        skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
5248        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
5249    }
5250    lock.unlock();
5251    if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
5252}
5253
5254VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
5255    bool skip = false;
5256    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5257    unique_lock_t lock(global_lock);
5258    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5259    if (pCB) {
5260        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ce02415);
5261        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
5262        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
5263    }
5264    lock.unlock();
5265    if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
5266}
5267
5268VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
5269                                                    uint32_t compareMask) {
5270    bool skip = false;
5271    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5272    unique_lock_t lock(global_lock);
5273    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5274    if (pCB) {
5275        skip |=
5276            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1da02415);
5277        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
5278        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
5279    }
5280    lock.unlock();
5281    if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
5282}
5283
5284VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
5285    bool skip = false;
5286    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5287    unique_lock_t lock(global_lock);
5288    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5289    if (pCB) {
5290        skip |=
5291            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1de02415);
5292        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
5293        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
5294    }
5295    lock.unlock();
5296    if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
5297}
5298
5299VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
5300    bool skip = false;
5301    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5302    unique_lock_t lock(global_lock);
5303    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5304    if (pCB) {
5305        skip |=
5306            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1dc02415);
5307        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
5308        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
5309    }
5310    lock.unlock();
5311    if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
5312}
5313
5314VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5315                                                 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
5316                                                 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
5317                                                 const uint32_t *pDynamicOffsets) {
5318    bool skip = false;
5319    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5320    unique_lock_t lock(global_lock);
5321    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5322    if (cb_state) {
5323        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5324                                      VALIDATION_ERROR_17c02415);
5325        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
5326        // Track total count of dynamic descriptor types to make sure we have an offset for each one
5327        uint32_t total_dynamic_descriptors = 0;
5328        string error_string = "";
5329        uint32_t last_set_index = firstSet + setCount - 1;
5330        if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
5331            cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
5332            cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
5333        }
5334        auto old_final_bound_set = cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index];
5335        auto pipeline_layout = getPipelineLayout(dev_data, layout);
5336        for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
5337            cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(dev_data, pDescriptorSets[set_idx]);
5338            if (descriptor_set) {
5339                cb_state->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
5340                cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[set_idx + firstSet] = descriptor_set;
5341                if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
5342                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
5343                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
5344                                    __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
5345                                    "Descriptor Set 0x%" PRIxLEAST64
5346                                    " bound but it was never updated. You may want to either update it or not bind it.",
5347                                    HandleToUint64(pDescriptorSets[set_idx]));
5348                }
5349                // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
5350                if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
5351                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5352                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
5353                                    __LINE__, VALIDATION_ERROR_17c002cc, "DS",
5354                                    "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
5355                                    "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
5356                                    set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str(),
5357                                    validation_error_map[VALIDATION_ERROR_17c002cc]);
5358                }
5359
5360                auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
5361
5362                cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx].clear();
5363
5364                if (set_dynamic_descriptor_count) {
5365                    // First make sure we won't overstep bounds of pDynamicOffsets array
5366                    if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
5367                        skip |= log_msg(
5368                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5369                            HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
5370                            "descriptorSet #%u (0x%" PRIxLEAST64
5371                            ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
5372                            "array. There must be one dynamic offset for each dynamic descriptor being bound.",
5373                            set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
5374                            (dynamicOffsetCount - total_dynamic_descriptors));
5375                    } else {  // Validate and store dynamic offsets with the set
5376                        // Validate Dynamic Offset Minimums
5377                        uint32_t cur_dyn_offset = total_dynamic_descriptors;
5378                        for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
5379                            if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
5380                                if (SafeModulo(
5381                                        pDynamicOffsets[cur_dyn_offset],
5382                                        dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
5383                                    skip |=
5384                                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5385                                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
5386                                                VALIDATION_ERROR_17c002d4, "DS",
5387                                                "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
5388                                                "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
5389                                                cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
5390                                                dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5391                                                validation_error_map[VALIDATION_ERROR_17c002d4]);
5392                                }
5393                                cur_dyn_offset++;
5394                            } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
5395                                if (SafeModulo(
5396                                        pDynamicOffsets[cur_dyn_offset],
5397                                        dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
5398                                    skip |=
5399                                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5400                                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
5401                                                VALIDATION_ERROR_17c002d4, "DS",
5402                                                "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
5403                                                "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
5404                                                cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
5405                                                dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
5406                                                validation_error_map[VALIDATION_ERROR_17c002d4]);
5407                                }
5408                                cur_dyn_offset++;
5409                            }
5410                        }
5411
5412                        cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx] =
5413                            std::vector<uint32_t>(pDynamicOffsets + total_dynamic_descriptors,
5414                                                  pDynamicOffsets + total_dynamic_descriptors + set_dynamic_descriptor_count);
5415                        // Keep running total of dynamic descriptor count to verify at the end
5416                        total_dynamic_descriptors += set_dynamic_descriptor_count;
5417                    }
5418                }
5419            } else {
5420                skip |=
5421                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5422                            HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_SET, "DS",
5423                            "Attempt to bind descriptor set 0x%" PRIxLEAST64 " that doesn't exist!",
5424                            HandleToUint64(pDescriptorSets[set_idx]));
5425            }
5426            // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
5427            if (firstSet > 0) {  // Check set #s below the first bound set
5428                for (uint32_t i = 0; i < firstSet; ++i) {
5429                    if (cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
5430                        !verify_set_layout_compatibility(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i],
5431                                                         pipeline_layout, i, error_string)) {
5432                        skip |= log_msg(
5433                            dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
5434                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5435                            HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i]), __LINE__, DRAWSTATE_NONE,
5436                            "DS", "DescriptorSet 0x%" PRIxLEAST64
5437                                  " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
5438                            HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i]), i,
5439                            HandleToUint64(layout));
5440                        cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
5441                    }
5442                }
5443            }
5444            // Check if newly last bound set invalidates any remaining bound sets
5445            if ((cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (last_set_index)) {
5446                if (old_final_bound_set &&
5447                    !verify_set_layout_compatibility(old_final_bound_set, pipeline_layout, last_set_index, error_string)) {
5448                    auto old_set = old_final_bound_set->GetSet();
5449                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
5450                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(old_set), __LINE__,
5451                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
5452                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
5453                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
5454                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
5455                                    HandleToUint64(old_set), last_set_index,
5456                                    HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index]),
5457                                    last_set_index, last_set_index + 1, HandleToUint64(layout));
5458                    cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
5459                }
5460            }
5461        }
5462        //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
5463        if (total_dynamic_descriptors != dynamicOffsetCount) {
5464            skip |=
5465                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5466                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_17c002ce, "DS",
5467                        "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
5468                        "is %u. It should exactly match the number of dynamic descriptors. %s",
5469                        setCount, total_dynamic_descriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_17c002ce]);
5470        }
5471    }
5472    lock.unlock();
5473    if (!skip)
5474        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
5475                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
5476}
5477
5478VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5479                                              VkIndexType indexType) {
5480    bool skip = false;
5481    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5482    // TODO : Somewhere need to verify that IBs have correct usage state flagged
5483    unique_lock_t lock(global_lock);
5484
5485    auto buffer_state = GetBufferState(dev_data, buffer);
5486    auto cb_node = GetCBNode(dev_data, commandBuffer);
5487    if (cb_node && buffer_state) {
5488        skip |=
5489            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_17e02415);
5490        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
5491        skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_17e00364);
5492        std::function<bool()> function = [=]() {
5493            return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
5494        };
5495        cb_node->queue_submit_functions.push_back(function);
5496        VkDeviceSize offset_align = 0;
5497        switch (indexType) {
5498            case VK_INDEX_TYPE_UINT16:
5499                offset_align = 2;
5500                break;
5501            case VK_INDEX_TYPE_UINT32:
5502                offset_align = 4;
5503                break;
5504            default:
5505                // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
5506                break;
5507        }
5508        if (!offset_align || (offset % offset_align)) {
5509            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5510                            HandleToUint64(commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
5511                            "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
5512                            string_VkIndexType(indexType));
5513        }
5514        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
5515    } else {
5516        assert(0);
5517    }
5518    lock.unlock();
5519    if (!skip) dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
5520}
5521
5522void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
5523    uint32_t end = firstBinding + bindingCount;
5524    if (pCB->currentDrawData.buffers.size() < end) {
5525        pCB->currentDrawData.buffers.resize(end);
5526    }
5527    for (uint32_t i = 0; i < bindingCount; ++i) {
5528        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
5529    }
5530}
5531
5532static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
5533
5534VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
5535                                                const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
5536    bool skip = false;
5537    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5538    // TODO : Somewhere need to verify that VBs have correct usage state flagged
5539    unique_lock_t lock(global_lock);
5540
5541    auto cb_node = GetCBNode(dev_data, commandBuffer);
5542    if (cb_node) {
5543        skip |=
5544            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_18202415);
5545        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffers()");
5546        for (uint32_t i = 0; i < bindingCount; ++i) {
5547            auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
5548            assert(buffer_state);
5549            skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_182004e8);
5550            std::function<bool()> function = [=]() {
5551                return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
5552            };
5553            cb_node->queue_submit_functions.push_back(function);
5554            if (pOffsets[i] >= buffer_state->createInfo.size) {
5555                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5556                                HandleToUint64(buffer_state->buffer), __LINE__, VALIDATION_ERROR_182004e4, "DS",
5557                                "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer. %s",
5558                                pOffsets[i], validation_error_map[VALIDATION_ERROR_182004e4]);
5559            }
5560        }
5561        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
5562    } else {
5563        assert(0);
5564    }
5565    lock.unlock();
5566    if (!skip) dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
5567}
5568
5569// Expects global_lock to be held by caller
5570static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5571    for (auto imageView : pCB->updateImages) {
5572        auto view_state = GetImageViewState(dev_data, imageView);
5573        if (!view_state) continue;
5574
5575        auto image_state = GetImageState(dev_data, view_state->create_info.image);
5576        assert(image_state);
5577        std::function<bool()> function = [=]() {
5578            SetImageMemoryValid(dev_data, image_state, true);
5579            return false;
5580        };
5581        pCB->queue_submit_functions.push_back(function);
5582    }
5583    for (auto buffer : pCB->updateBuffers) {
5584        auto buffer_state = GetBufferState(dev_data, buffer);
5585        assert(buffer_state);
5586        std::function<bool()> function = [=]() {
5587            SetBufferMemoryValid(dev_data, buffer_state, true);
5588            return false;
5589        };
5590        pCB->queue_submit_functions.push_back(function);
5591    }
5592}
5593
5594// Generic function to handle validation for all CmdDraw* type functions
5595static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
5596                                CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
5597                                UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
5598                                UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
5599    bool skip = false;
5600    *cb_state = GetCBNode(dev_data, cmd_buffer);
5601    if (*cb_state) {
5602        skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
5603        skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
5604        skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
5605        skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
5606                                                                : insideRenderPass(dev_data, *cb_state, caller, msg_code);
5607    }
5608    return skip;
5609}
5610
5611// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
5612static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5613    UpdateDrawState(dev_data, cb_state, bind_point);
5614    MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
5615}
5616
5617// Generic function to handle state update for all CmdDraw* type functions
5618static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5619    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
5620    updateResourceTrackingOnDraw(cb_state);
5621    cb_state->hasDrawCmd = true;
5622}
5623
5624static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
5625                                   GLOBAL_CB_NODE **cb_state, const char *caller) {
5626    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5627                               VALIDATION_ERROR_1a202415, VALIDATION_ERROR_1a200017, VALIDATION_ERROR_1a200376);
5628}
5629
5630static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5631    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5632}
5633
5634VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
5635                                   uint32_t firstVertex, uint32_t firstInstance) {
5636    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5637    GLOBAL_CB_NODE *cb_state = nullptr;
5638    unique_lock_t lock(global_lock);
5639    bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
5640    lock.unlock();
5641    if (!skip) {
5642        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
5643        lock.lock();
5644        PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
5645        lock.unlock();
5646    }
5647}
5648
5649static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
5650                                          VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
5651    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5652                               VALIDATION_ERROR_1a402415, VALIDATION_ERROR_1a400017, VALIDATION_ERROR_1a40039c);
5653}
5654
5655static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5656    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5657}
5658
5659VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
5660                                          uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
5661    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5662    GLOBAL_CB_NODE *cb_state = nullptr;
5663    unique_lock_t lock(global_lock);
5664    bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
5665                                              "vkCmdDrawIndexed()");
5666    lock.unlock();
5667    if (!skip) {
5668        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
5669        lock.lock();
5670        PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
5671        lock.unlock();
5672    }
5673}
5674
5675static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5676                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
5677                                           const char *caller) {
5678    bool skip =
5679        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5680                            VALIDATION_ERROR_1aa02415, VALIDATION_ERROR_1aa00017, VALIDATION_ERROR_1aa003cc);
5681    *buffer_state = GetBufferState(dev_data, buffer);
5682    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1aa003b4);
5683    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
5684    // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
5685    return skip;
5686}
5687
5688static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5689                                          BUFFER_STATE *buffer_state) {
5690    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5691    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5692}
5693
5694VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
5695                                           uint32_t stride) {
5696    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5697    GLOBAL_CB_NODE *cb_state = nullptr;
5698    BUFFER_STATE *buffer_state = nullptr;
5699    unique_lock_t lock(global_lock);
5700    bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
5701                                               &buffer_state, "vkCmdDrawIndirect()");
5702    lock.unlock();
5703    if (!skip) {
5704        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
5705        lock.lock();
5706        PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
5707        lock.unlock();
5708    }
5709}
5710
5711static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5712                                                  VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
5713                                                  BUFFER_STATE **buffer_state, const char *caller) {
5714    bool skip =
5715        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
5716                            VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1a602415, VALIDATION_ERROR_1a600017, VALIDATION_ERROR_1a600434);
5717    *buffer_state = GetBufferState(dev_data, buffer);
5718    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a60041c);
5719    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
5720    // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
5721    // 'buffer'.
5722    return skip;
5723}
5724
5725static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5726                                                 BUFFER_STATE *buffer_state) {
5727    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5728    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5729}
5730
5731VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5732                                                  uint32_t count, uint32_t stride) {
5733    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5734    GLOBAL_CB_NODE *cb_state = nullptr;
5735    BUFFER_STATE *buffer_state = nullptr;
5736    unique_lock_t lock(global_lock);
5737    bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
5738                                                      &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
5739    lock.unlock();
5740    if (!skip) {
5741        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
5742        lock.lock();
5743        PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
5744        lock.unlock();
5745    }
5746}
5747
5748static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
5749                                       VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
5750    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
5751                               VALIDATION_ERROR_19c02415, VALIDATION_ERROR_19c00017, VALIDATION_ERROR_UNDEFINED);
5752}
5753
5754static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5755    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
5756}
5757
5758VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
5759    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5760    GLOBAL_CB_NODE *cb_state = nullptr;
5761    unique_lock_t lock(global_lock);
5762    bool skip =
5763        PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
5764    lock.unlock();
5765    if (!skip) {
5766        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
5767        lock.lock();
5768        PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
5769        lock.unlock();
5770    }
5771}
5772
5773static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5774                                               VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
5775                                               BUFFER_STATE **buffer_state, const char *caller) {
5776    bool skip =
5777        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
5778                            VALIDATION_ERROR_1a002415, VALIDATION_ERROR_1a000017, VALIDATION_ERROR_UNDEFINED);
5779    *buffer_state = GetBufferState(dev_data, buffer);
5780    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a000322);
5781    return skip;
5782}
5783
5784static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5785                                              BUFFER_STATE *buffer_state) {
5786    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
5787    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5788}
5789
5790VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
5791    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5792    GLOBAL_CB_NODE *cb_state = nullptr;
5793    BUFFER_STATE *buffer_state = nullptr;
5794    unique_lock_t lock(global_lock);
5795    bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
5796                                                   &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
5797    lock.unlock();
5798    if (!skip) {
5799        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
5800        lock.lock();
5801        PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
5802        lock.unlock();
5803    }
5804}
5805
5806VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
5807                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
5808    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5809    unique_lock_t lock(global_lock);
5810
5811    auto cb_node = GetCBNode(device_data, commandBuffer);
5812    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
5813    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
5814
5815    if (cb_node && src_buffer_state && dst_buffer_state) {
5816        bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
5817        if (!skip) {
5818            PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
5819            lock.unlock();
5820            device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
5821        }
5822    } else {
5823        lock.unlock();
5824        assert(0);
5825    }
5826}
5827
5828VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5829                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5830                                        const VkImageCopy *pRegions) {
5831    bool skip = false;
5832    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5833    unique_lock_t lock(global_lock);
5834
5835    auto cb_node = GetCBNode(device_data, commandBuffer);
5836    auto src_image_state = GetImageState(device_data, srcImage);
5837    auto dst_image_state = GetImageState(device_data, dstImage);
5838    if (cb_node && src_image_state && dst_image_state) {
5839        skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
5840                                           srcImageLayout, dstImageLayout);
5841        if (!skip) {
5842            PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
5843                                      dstImageLayout);
5844            lock.unlock();
5845            device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5846                                                     pRegions);
5847        }
5848    } else {
5849        lock.unlock();
5850        assert(0);
5851    }
5852}
5853
5854// Validate that an image's sampleCount matches the requirement for a specific API call
5855bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
5856                              const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
5857    bool skip = false;
5858    if (image_state->createInfo.samples != sample_count) {
5859        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
5860                       HandleToUint64(image_state->image), 0, msgCode, "DS",
5861                       "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
5862                       HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
5863                       string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
5864    }
5865    return skip;
5866}
5867
5868VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5869                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5870                                        const VkImageBlit *pRegions, VkFilter filter) {
5871    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5872    unique_lock_t lock(global_lock);
5873
5874    auto cb_node = GetCBNode(dev_data, commandBuffer);
5875    auto src_image_state = GetImageState(dev_data, srcImage);
5876    auto dst_image_state = GetImageState(dev_data, dstImage);
5877
5878    bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, filter);
5879
5880    if (!skip) {
5881        PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state);
5882        lock.unlock();
5883        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5884                                              pRegions, filter);
5885    }
5886}
5887
5888VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
5889                                                VkImageLayout dstImageLayout, uint32_t regionCount,
5890                                                const VkBufferImageCopy *pRegions) {
5891    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5892    unique_lock_t lock(global_lock);
5893    bool skip = false;
5894    auto cb_node = GetCBNode(device_data, commandBuffer);
5895    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
5896    auto dst_image_state = GetImageState(device_data, dstImage);
5897    if (cb_node && src_buffer_state && dst_image_state) {
5898        skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
5899                                                        regionCount, pRegions, "vkCmdCopyBufferToImage()");
5900    } else {
5901        lock.unlock();
5902        assert(0);
5903        // TODO: report VU01244 here, or put in object tracker?
5904    }
5905    if (!skip) {
5906        PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
5907                                          dstImageLayout);
5908        lock.unlock();
5909        device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
5910    }
5911}
5912
5913VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5914                                                VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
5915    bool skip = false;
5916    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5917    unique_lock_t lock(global_lock);
5918
5919    auto cb_node = GetCBNode(device_data, commandBuffer);
5920    auto src_image_state = GetImageState(device_data, srcImage);
5921    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
5922    if (cb_node && src_image_state && dst_buffer_state) {
5923        skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
5924                                                        regionCount, pRegions, "vkCmdCopyImageToBuffer()");
5925    } else {
5926        lock.unlock();
5927        assert(0);
5928        // TODO: report VU01262 here, or put in object tracker?
5929    }
5930    if (!skip) {
5931        PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
5932                                          srcImageLayout);
5933        lock.unlock();
5934        device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
5935    }
5936}
5937
5938static bool PreCallCmdUpdateBuffer(layer_data *device_data, const GLOBAL_CB_NODE *cb_state, const BUFFER_STATE *dst_buffer_state) {
5939    bool skip = false;
5940    skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400046);
5941    // Validate that DST buffer has correct usage flags set
5942    skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
5943                                     VALIDATION_ERROR_1e400044, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
5944    skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
5945                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1e402415);
5946    skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
5947    skip |= insideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400017);
5948    return skip;
5949}
5950
5951static void PostCallRecordCmdUpdateBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buffer_state) {
5952    // Update bindings between buffer and cmd buffer
5953    AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
5954    std::function<bool()> function = [=]() {
5955        SetBufferMemoryValid(device_data, dst_buffer_state, true);
5956        return false;
5957    };
5958    cb_state->queue_submit_functions.push_back(function);
5959}
5960
5961VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5962                                           VkDeviceSize dataSize, const uint32_t *pData) {
5963    bool skip = false;
5964    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5965    unique_lock_t lock(global_lock);
5966
5967    auto cb_state = GetCBNode(dev_data, commandBuffer);
5968    assert(cb_state);
5969    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
5970    assert(dst_buff_state);
5971    skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
5972    lock.unlock();
5973    if (!skip) {
5974        dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
5975        lock.lock();
5976        PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
5977        lock.unlock();
5978    }
5979}
5980
5981VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5982                                         VkDeviceSize size, uint32_t data) {
5983    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5984    unique_lock_t lock(global_lock);
5985    auto cb_node = GetCBNode(device_data, commandBuffer);
5986    auto buffer_state = GetBufferState(device_data, dstBuffer);
5987
5988    if (cb_node && buffer_state) {
5989        bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
5990        if (!skip) {
5991            PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
5992            lock.unlock();
5993            device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
5994        }
5995    } else {
5996        lock.unlock();
5997        assert(0);
5998    }
5999}
6000
6001VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
6002                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
6003                                               const VkClearRect *pRects) {
6004    bool skip = false;
6005    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6006    {
6007        lock_guard_t lock(global_lock);
6008        skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6009    }
6010    if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6011}
6012
6013VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6014                                              const VkClearColorValue *pColor, uint32_t rangeCount,
6015                                              const VkImageSubresourceRange *pRanges) {
6016    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6017    unique_lock_t lock(global_lock);
6018
6019    bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6020    if (!skip) {
6021        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6022        lock.unlock();
6023        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
6024    }
6025}
6026
6027VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6028                                                     const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
6029                                                     const VkImageSubresourceRange *pRanges) {
6030    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6031    unique_lock_t lock(global_lock);
6032
6033    bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6034    if (!skip) {
6035        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6036        lock.unlock();
6037        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
6038    }
6039}
6040
6041VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6042                                           VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
6043                                           const VkImageResolve *pRegions) {
6044    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6045    unique_lock_t lock(global_lock);
6046
6047    auto cb_node = GetCBNode(dev_data, commandBuffer);
6048    auto src_image_state = GetImageState(dev_data, srcImage);
6049    auto dst_image_state = GetImageState(dev_data, dstImage);
6050
6051    bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
6052
6053    if (!skip) {
6054        PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
6055        lock.unlock();
6056        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
6057                                                 pRegions);
6058    }
6059}
6060
6061VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
6062                                                     VkSubresourceLayout *pLayout) {
6063    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6064
6065    bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
6066    if (!skip) {
6067        device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
6068    }
6069}
6070
6071bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6072    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6073    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6074    if (pCB) {
6075        pCB->eventToStageMap[event] = stageMask;
6076    }
6077    auto queue_data = dev_data->queueMap.find(queue);
6078    if (queue_data != dev_data->queueMap.end()) {
6079        queue_data->second.eventToStageMap[event] = stageMask;
6080    }
6081    return false;
6082}
6083
6084VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6085    bool skip = false;
6086    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6087    unique_lock_t lock(global_lock);
6088    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6089    if (pCB) {
6090        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6091                                      VALIDATION_ERROR_1d402415);
6092        skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
6093        skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_1d400017);
6094        skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_1d4008fc,
6095                                             VALIDATION_ERROR_1d4008fe);
6096        auto event_state = GetEventNode(dev_data, event);
6097        if (event_state) {
6098            addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6099            event_state->cb_bindings.insert(pCB);
6100        }
6101        pCB->events.push_back(event);
6102        if (!pCB->waitedEvents.count(event)) {
6103            pCB->writeEventsBeforeWait.push_back(event);
6104        }
6105        pCB->eventUpdates.emplace_back([=](VkQueue q){return setEventStageMask(q, commandBuffer, event, stageMask);});
6106    }
6107    lock.unlock();
6108    if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
6109}
6110
6111VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6112    bool skip = false;
6113    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6114    unique_lock_t lock(global_lock);
6115    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6116    if (pCB) {
6117        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6118                                      VALIDATION_ERROR_1c402415);
6119        skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
6120        skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_1c400017);
6121        skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_1c400904,
6122                                             VALIDATION_ERROR_1c400906);
6123        auto event_state = GetEventNode(dev_data, event);
6124        if (event_state) {
6125            addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6126            event_state->cb_bindings.insert(pCB);
6127        }
6128        pCB->events.push_back(event);
6129        if (!pCB->waitedEvents.count(event)) {
6130            pCB->writeEventsBeforeWait.push_back(event);
6131        }
6132        // TODO : Add check for VALIDATION_ERROR_32c008f8
6133        pCB->eventUpdates.emplace_back([=](VkQueue q){return setEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0));});
6134    }
6135    lock.unlock();
6136    if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
6137}
6138
6139// Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT
6140static VkPipelineStageFlags ExpandPipelineStageFlags(VkPipelineStageFlags inflags) {
6141    return (inflags != VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT)
6142               ? inflags
6143               : (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
6144                  VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
6145                  VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
6146                  VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
6147                  VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
6148                  VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
6149}
6150
6151// Verify image barrier image state and that the image is consistent with FB image
6152static bool ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
6153                                      VkFramebuffer framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc,
6154                                      uint64_t rp_handle, uint32_t img_index, const VkImageMemoryBarrier &img_barrier) {
6155    bool skip = false;
6156    const auto &fb_state = GetFramebufferState(device_data, framebuffer);
6157    assert(fb_state);
6158    const auto img_bar_image = img_barrier.image;
6159    bool image_match = false;
6160    bool sub_image_found = false;  // Do we find a corresponding subpass description
6161    VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
6162    uint32_t attach_index = 0;
6163    uint32_t index_count = 0;
6164    // Verify that a framebuffer image matches barrier image
6165    for (const auto &fb_attach : fb_state->attachments) {
6166        if (img_bar_image == fb_attach.image) {
6167            image_match = true;
6168            attach_index = index_count;
6169            break;
6170        }
6171        index_count++;
6172    }
6173    if (image_match) {  // Make sure subpass is referring to matching attachment
6174        if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
6175            sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
6176            sub_image_found = true;
6177        } else {
6178            for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
6179                if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
6180                    sub_image_layout = sub_desc.pColorAttachments[j].layout;
6181                    sub_image_found = true;
6182                    break;
6183                } else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) {
6184                    sub_image_layout = sub_desc.pResolveAttachments[j].layout;
6185                    sub_image_found = true;
6186                    break;
6187                }
6188            }
6189        }
6190        if (!sub_image_found) {
6191            skip |= log_msg(
6192                device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
6193                __LINE__, VALIDATION_ERROR_1b800936, "CORE",
6194                "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
6195                ") is not referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64 "). %s",
6196                funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle,
6197                validation_error_map[VALIDATION_ERROR_1b800936]);
6198        }
6199    } else {  // !image_match
6200        auto const fb_handle = HandleToUint64(fb_state->framebuffer);
6201        skip |=
6202            log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, fb_handle,
6203                    __LINE__, VALIDATION_ERROR_1b800936, "CORE",
6204                    "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
6205                    ") does not match an image from the current framebuffer (0x%" PRIx64 "). %s",
6206                    funcName, img_index, HandleToUint64(img_bar_image), fb_handle, validation_error_map[VALIDATION_ERROR_1b800936]);
6207    }
6208    if (img_barrier.oldLayout != img_barrier.newLayout) {
6209        skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6210                        HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b80093a, "CORE",
6211                        "%s: As the Image Barrier for image 0x%" PRIx64
6212                        " is being executed within a render pass instance, oldLayout must equal newLayout yet they are "
6213                        "%s and %s. %s",
6214                        funcName, HandleToUint64(img_barrier.image), string_VkImageLayout(img_barrier.oldLayout),
6215                        string_VkImageLayout(img_barrier.newLayout), validation_error_map[VALIDATION_ERROR_1b80093a]);
6216    } else {
6217        if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
6218            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6219                            rp_handle, __LINE__, VALIDATION_ERROR_1b800938, "CORE",
6220                            "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
6221                            ") is referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64
6222                            ") as having layout %s, but image barrier has layout %s. %s",
6223                            funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle,
6224                            string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(sub_image_layout),
6225                            validation_error_map[VALIDATION_ERROR_1b800938]);
6226        }
6227    }
6228    return skip;
6229}
6230
6231// Validate image barriers within a renderPass
6232static bool ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
6233                                            uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc, uint64_t rp_handle,
6234                                            VkAccessFlags sub_src_access_mask, VkAccessFlags sub_dst_access_mask,
6235                                            uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
6236    bool skip = false;
6237    for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
6238        const auto &img_barrier = image_barriers[i];
6239        const auto &img_src_access_mask = img_barrier.srcAccessMask;
6240        if (img_src_access_mask != (sub_src_access_mask & img_src_access_mask)) {
6241            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6242                            rp_handle, __LINE__, VALIDATION_ERROR_1b80092e, "CORE",
6243                            "%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
6244                            "srcAccessMask(0x%X) of "
6245                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6246                            funcName, i, img_src_access_mask, sub_src_access_mask, active_subpass, rp_handle,
6247                            validation_error_map[VALIDATION_ERROR_1b80092e]);
6248        }
6249        const auto &img_dst_access_mask = img_barrier.dstAccessMask;
6250        if (img_dst_access_mask != (sub_dst_access_mask & img_dst_access_mask)) {
6251            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6252                            rp_handle, __LINE__, VALIDATION_ERROR_1b800930, "CORE",
6253                            "%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
6254                            "dstAccessMask(0x%X) of "
6255                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6256                            funcName, i, img_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle,
6257                            validation_error_map[VALIDATION_ERROR_1b800930]);
6258        }
6259        if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
6260            VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
6261            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6262                            rp_handle, __LINE__, VALIDATION_ERROR_1b80093c, "CORE",
6263                            "%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
6264                            "pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED. %s",
6265                            funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex,
6266                            validation_error_map[VALIDATION_ERROR_1b80093c]);
6267        }
6268        // Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
6269        if (VK_NULL_HANDLE == cb_state->activeFramebuffer) {
6270            assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level);
6271            // Secondary CB case w/o FB specified delay validation
6272            cb_state->cmd_execute_commands_functions.emplace_back([=](VkFramebuffer fb) {
6273                return ValidateImageBarrierImage(device_data, funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i,
6274                                                 img_barrier);
6275            });
6276        } else {
6277            skip |= ValidateImageBarrierImage(device_data, funcName, cb_state, cb_state->activeFramebuffer, active_subpass,
6278                                              sub_desc, rp_handle, i, img_barrier);
6279        }
6280    }
6281    return skip;
6282}
6283
6284// Validate VUs for Pipeline Barriers that are within a renderPass
6285// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
6286static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
6287                                               VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
6288                                               VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
6289                                               const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
6290                                               const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count,
6291                                               const VkImageMemoryBarrier *image_barriers) {
6292    bool skip = false;
6293    auto rp_state = cb_state->activeRenderPass;
6294    const auto active_subpass = cb_state->activeSubpass;
6295    auto rp_handle = HandleToUint64(rp_state->renderPass);
6296    if (!rp_state->hasSelfDependency[active_subpass]) {
6297        skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6298                        rp_handle, __LINE__, VALIDATION_ERROR_1b800928, "CORE",
6299                        "%s: Barriers cannot be set during subpass %d of renderPass 0x%" PRIx64
6300                        " with no self-dependency specified. %s",
6301                        funcName, active_subpass, rp_handle, validation_error_map[VALIDATION_ERROR_1b800928]);
6302    } else {
6303        assert(rp_state->subpass_to_dependency_index[cb_state->activeSubpass] != -1);
6304        // Grab ref to current subpassDescription up-front for use below
6305        const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
6306        const auto &sub_dep = rp_state->createInfo.pDependencies[rp_state->subpass_to_dependency_index[active_subpass]];
6307        const auto &sub_src_stage_mask = ExpandPipelineStageFlags(sub_dep.srcStageMask);
6308        const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(sub_dep.dstStageMask);
6309        if ((sub_src_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
6310            (src_stage_mask != (sub_src_stage_mask & src_stage_mask))) {
6311            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6312                            rp_handle, __LINE__, VALIDATION_ERROR_1b80092a, "CORE",
6313                            "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask(0x%X) of "
6314                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6315                            funcName, src_stage_mask, sub_src_stage_mask, active_subpass, rp_handle,
6316                            validation_error_map[VALIDATION_ERROR_1b80092a]);
6317        }
6318        if ((sub_dst_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
6319            (dst_stage_mask != (sub_dst_stage_mask & dst_stage_mask))) {
6320            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6321                            rp_handle, __LINE__, VALIDATION_ERROR_1b80092c, "CORE",
6322                            "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask(0x%X) of "
6323                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6324                            funcName, dst_stage_mask, sub_dst_stage_mask, active_subpass, rp_handle,
6325                            validation_error_map[VALIDATION_ERROR_1b80092c]);
6326        }
6327        if (0 != buffer_mem_barrier_count) {
6328            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6329                            rp_handle, __LINE__, VALIDATION_ERROR_1b800934, "CORE",
6330                            "%s: bufferMemoryBarrierCount is non-zero (%d) for "
6331                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6332                            funcName, buffer_mem_barrier_count, active_subpass, rp_handle,
6333                            validation_error_map[VALIDATION_ERROR_1b800934]);
6334        }
6335        const auto &sub_src_access_mask = sub_dep.srcAccessMask;
6336        const auto &sub_dst_access_mask = sub_dep.dstAccessMask;
6337        for (uint32_t i = 0; i < mem_barrier_count; ++i) {
6338            const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
6339            if (mb_src_access_mask != (sub_src_access_mask & mb_src_access_mask)) {
6340                skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6341                                VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, __LINE__, VALIDATION_ERROR_1b80092e, "CORE",
6342                                "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
6343                                "srcAccessMask(0x%X) of "
6344                                "subpass %d of renderPass 0x%" PRIx64 ". %s",
6345                                funcName, i, mb_src_access_mask, sub_src_access_mask, active_subpass, rp_handle,
6346                                validation_error_map[VALIDATION_ERROR_1b80092e]);
6347            }
6348            const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
6349            if (mb_dst_access_mask != (sub_dst_access_mask & mb_dst_access_mask)) {
6350                skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6351                                VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, __LINE__, VALIDATION_ERROR_1b800930, "CORE",
6352                                "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
6353                                "dstAccessMask(0x%X) of "
6354                                "subpass %d of renderPass 0x%" PRIx64 ". %s",
6355                                funcName, i, mb_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle,
6356                                validation_error_map[VALIDATION_ERROR_1b800930]);
6357            }
6358        }
6359        skip |= ValidateRenderPassImageBarriers(device_data, funcName, cb_state, active_subpass, sub_desc, rp_handle,
6360                                                sub_src_access_mask, sub_dst_access_mask, image_mem_barrier_count, image_barriers);
6361        if (sub_dep.dependencyFlags != dependency_flags) {
6362            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6363                            rp_handle, __LINE__, VALIDATION_ERROR_1b800932, "CORE",
6364                            "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency "
6365                            "dependencyFlags value (0x%X) for "
6366                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6367                            funcName, dependency_flags, sub_dep.dependencyFlags, cb_state->activeSubpass, rp_handle,
6368                            validation_error_map[VALIDATION_ERROR_1b800932]);
6369        }
6370    }
6371    return skip;
6372}
6373
6374// Array to mask individual accessMask to corresponding stageMask
6375//  accessMask active bit position (0-31) maps to index
6376const static VkPipelineStageFlags AccessMaskToPipeStage[20] = {
6377    // VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
6378    VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
6379    // VK_ACCESS_INDEX_READ_BIT = 1
6380    VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
6381    // VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
6382    VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
6383    // VK_ACCESS_UNIFORM_READ_BIT = 3
6384    VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
6385        VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
6386        VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
6387    // VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
6388    VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
6389    // VK_ACCESS_SHADER_READ_BIT = 5
6390    VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
6391        VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
6392        VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
6393    // VK_ACCESS_SHADER_WRITE_BIT = 6
6394    VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
6395        VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
6396        VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
6397    // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
6398    VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
6399    // VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
6400    VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
6401    // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
6402    VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
6403    // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
6404    VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
6405    // VK_ACCESS_TRANSFER_READ_BIT = 11
6406    VK_PIPELINE_STAGE_TRANSFER_BIT,
6407    // VK_ACCESS_TRANSFER_WRITE_BIT = 12
6408    VK_PIPELINE_STAGE_TRANSFER_BIT,
6409    // VK_ACCESS_HOST_READ_BIT = 13
6410    VK_PIPELINE_STAGE_HOST_BIT,
6411    // VK_ACCESS_HOST_WRITE_BIT = 14
6412    VK_PIPELINE_STAGE_HOST_BIT,
6413    // VK_ACCESS_MEMORY_READ_BIT = 15
6414    VK_ACCESS_FLAG_BITS_MAX_ENUM,  // Always match
6415    // VK_ACCESS_MEMORY_WRITE_BIT = 16
6416    VK_ACCESS_FLAG_BITS_MAX_ENUM,  // Always match
6417    // VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
6418    VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
6419    // VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
6420    VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
6421};
6422
6423// Verify that all bits of access_mask are supported by the src_stage_mask
6424static bool ValidateAccessMaskPipelineStage(VkAccessFlags access_mask, VkPipelineStageFlags stage_mask) {
6425    // Early out if all commands set, or access_mask NULL
6426    if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
6427
6428    stage_mask = ExpandPipelineStageFlags(stage_mask);
6429    int index = 0;
6430    // for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
6431    while (access_mask) {
6432        index = (u_ffs(access_mask) - 1);
6433        assert(index >= 0);
6434        // Must have "!= 0" compare to prevent warning from MSVC
6435        if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false;  // early out
6436        access_mask &= ~(1 << index);                                        // Mask off bit that's been checked
6437    }
6438    return true;
6439}
6440
6441static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
6442                             VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
6443                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
6444                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
6445                             const VkImageMemoryBarrier *pImageMemBarriers) {
6446    bool skip = false;
6447    for (uint32_t i = 0; i < memBarrierCount; ++i) {
6448        const auto &mem_barrier = pMemBarriers[i];
6449        if (!ValidateAccessMaskPipelineStage(mem_barrier.srcAccessMask, src_stage_mask)) {
6450            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6451                            HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800940, "DS",
6452                            "%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X). %s", funcName, i,
6453                            mem_barrier.srcAccessMask, src_stage_mask, validation_error_map[VALIDATION_ERROR_1b800940]);
6454        }
6455        if (!ValidateAccessMaskPipelineStage(mem_barrier.dstAccessMask, dst_stage_mask)) {
6456            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6457                            HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800942, "DS",
6458                            "%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X). %s", funcName, i,
6459                            mem_barrier.dstAccessMask, dst_stage_mask, validation_error_map[VALIDATION_ERROR_1b800942]);
6460        }
6461    }
6462    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
6463        auto mem_barrier = &pImageMemBarriers[i];
6464        if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
6465            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6466                            HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800940, "DS",
6467                            "%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X). %s", funcName,
6468                            i, mem_barrier->srcAccessMask, src_stage_mask, validation_error_map[VALIDATION_ERROR_1b800940]);
6469        }
6470        if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
6471            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6472                            HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800942, "DS",
6473                            "%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X). %s", funcName,
6474                            i, mem_barrier->dstAccessMask, dst_stage_mask, validation_error_map[VALIDATION_ERROR_1b800942]);
6475        }
6476        auto image_data = GetImageState(device_data, mem_barrier->image);
6477        if (image_data) {
6478            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
6479            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
6480            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
6481                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
6482                // be VK_QUEUE_FAMILY_IGNORED
6483                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
6484                    skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6485                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
6486                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6487                                    "%s: Image Barrier for image 0x%" PRIx64
6488                                    " was created with sharingMode of "
6489                                    "VK_SHARING_MODE_CONCURRENT. Src and dst "
6490                                    "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
6491                                    funcName, HandleToUint64(mem_barrier->image));
6492                }
6493            } else {
6494                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
6495                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
6496                // or both be a valid queue family
6497                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
6498                    (src_q_f_index != dst_q_f_index)) {
6499                    skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6500                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
6501                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6502                                    "%s: Image 0x%" PRIx64
6503                                    " was created with sharingMode "
6504                                    "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
6505                                    "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
6506                                    "must be.",
6507                                    funcName, HandleToUint64(mem_barrier->image));
6508                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
6509                           ((src_q_f_index >= device_data->phys_dev_properties.queue_family_properties.size()) ||
6510                            (dst_q_f_index >= device_data->phys_dev_properties.queue_family_properties.size()))) {
6511                    skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6512                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
6513                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6514                                    "%s: Image 0x%" PRIx64
6515                                    " was created with sharingMode "
6516                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
6517                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
6518                                    "queueFamilies crated for this device.",
6519                                    funcName, HandleToUint64(mem_barrier->image), src_q_f_index, dst_q_f_index,
6520                                    device_data->phys_dev_properties.queue_family_properties.size());
6521                }
6522            }
6523        }
6524
6525        if (mem_barrier->oldLayout != mem_barrier->newLayout) {
6526            skip |= ValidateMaskBitsFromLayouts(device_data, cb_state->commandBuffer, mem_barrier->srcAccessMask,
6527                                                mem_barrier->oldLayout, "Source");
6528            skip |= ValidateMaskBitsFromLayouts(device_data, cb_state->commandBuffer, mem_barrier->dstAccessMask,
6529                                                mem_barrier->newLayout, "Dest");
6530        }
6531        if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
6532            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6533                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6534                            "%s: Image Layout cannot be transitioned to UNDEFINED or "
6535                            "PREINITIALIZED.",
6536                            funcName);
6537        }
6538        if (image_data) {
6539            auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
6540            skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
6541
6542            std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
6543            skip |= ValidateImageBarrierSubresourceRange(device_data, image_data, mem_barrier->subresourceRange, funcName,
6544                                                         param_name.c_str());
6545        }
6546    }
6547
6548    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
6549        auto mem_barrier = &pBufferMemBarriers[i];
6550        if (!mem_barrier) continue;
6551
6552        if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
6553            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6554                            HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800940, "DS",
6555                            "%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X). %s", funcName,
6556                            i, mem_barrier->srcAccessMask, src_stage_mask, validation_error_map[VALIDATION_ERROR_1b800940]);
6557        }
6558        if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
6559            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6560                            HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800942, "DS",
6561                            "%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X). %s", funcName,
6562                            i, mem_barrier->dstAccessMask, dst_stage_mask, validation_error_map[VALIDATION_ERROR_1b800942]);
6563        }
6564        // Validate buffer barrier queue family indices
6565        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
6566             mem_barrier->srcQueueFamilyIndex >= device_data->phys_dev_properties.queue_family_properties.size()) ||
6567            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
6568             mem_barrier->dstQueueFamilyIndex >= device_data->phys_dev_properties.queue_family_properties.size())) {
6569            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6570                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6571                            "%s: Buffer Barrier 0x%" PRIx64
6572                            " has QueueFamilyIndex greater "
6573                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
6574                            funcName, HandleToUint64(mem_barrier->buffer),
6575                            device_data->phys_dev_properties.queue_family_properties.size());
6576        }
6577
6578        auto buffer_state = GetBufferState(device_data, mem_barrier->buffer);
6579        if (buffer_state) {
6580            auto buffer_size = buffer_state->requirements.size;
6581            if (mem_barrier->offset >= buffer_size) {
6582                skip |= log_msg(
6583                    device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6584                    HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6585                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
6586                    funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
6587                    HandleToUint64(buffer_size));
6588            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
6589                skip |=
6590                    log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6591                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6592                            "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
6593                            " whose sum is greater than total size 0x%" PRIx64 ".",
6594                            funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
6595                            HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
6596            }
6597        }
6598    }
6599    return skip;
6600}
6601
6602bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
6603                            VkPipelineStageFlags sourceStageMask) {
6604    bool skip = false;
6605    VkPipelineStageFlags stageMask = 0;
6606    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
6607    for (uint32_t i = 0; i < eventCount; ++i) {
6608        auto event = pCB->events[firstEventIndex + i];
6609        auto queue_data = dev_data->queueMap.find(queue);
6610        if (queue_data == dev_data->queueMap.end()) return false;
6611        auto event_data = queue_data->second.eventToStageMap.find(event);
6612        if (event_data != queue_data->second.eventToStageMap.end()) {
6613            stageMask |= event_data->second;
6614        } else {
6615            auto global_event_data = GetEventNode(dev_data, event);
6616            if (!global_event_data) {
6617                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
6618                                HandleToUint64(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
6619                                "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
6620            } else {
6621                stageMask |= global_event_data->stageMask;
6622            }
6623        }
6624    }
6625    // TODO: Need to validate that host_bit is only set if set event is called
6626    // but set event can be called at any time.
6627    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
6628        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6629                        HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1e62d401, "DS",
6630                        "Submitting cmdbuffer with call to VkCmdWaitEvents "
6631                        "using srcStageMask 0x%X which must be the bitwise "
6632                        "OR of the stageMask parameters used in calls to "
6633                        "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
6634                        "used with vkSetEvent but instead is 0x%X. %s",
6635                        sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_1e62d401]);
6636    }
6637    return skip;
6638}
6639
6640// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
6641static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
6642    {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
6643    {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
6644    {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
6645    {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6646    {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6647    {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6648    {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6649    {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6650    {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
6651    {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
6652    {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
6653    {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
6654    {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
6655    {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
6656
6657static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
6658                                                            VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
6659                                                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
6660                                                            VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
6661                                                            VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
6662                                                            VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
6663                                                            VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
6664                                                            VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
6665                                                            VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
6666                                                            VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
6667                                                            VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
6668                                                            VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
6669                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
6670                                                            VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
6671
6672bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
6673                                      VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
6674                                      UNIQUE_VALIDATION_ERROR_CODE error_code) {
6675    bool skip = false;
6676    // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
6677    for (const auto &item : stage_flag_bit_array) {
6678        if (stage_mask & item) {
6679            if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
6680                skip |=
6681                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6682                            HandleToUint64(command_buffer), __LINE__, error_code, "DL",
6683                            "%s(): %s flag %s is not compatible with the queue family properties of this "
6684                            "command buffer. %s",
6685                            function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
6686                            validation_error_map[error_code]);
6687            }
6688        }
6689    }
6690    return skip;
6691}
6692
6693bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
6694                                                VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
6695                                                const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
6696    bool skip = false;
6697    uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
6698    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
6699    auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
6700
6701    // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
6702    // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
6703    // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
6704
6705    if (queue_family_index < physical_device_state->queue_family_properties.size()) {
6706        VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
6707
6708        if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
6709            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
6710                                                     function, "srcStageMask", error_code);
6711        }
6712        if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
6713            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
6714                                                     function, "dstStageMask", error_code);
6715        }
6716    }
6717    return skip;
6718}
6719
6720VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6721                                         VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
6722                                         uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6723                                         uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6724                                         uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6725    bool skip = false;
6726    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6727    unique_lock_t lock(global_lock);
6728    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6729    if (cb_state) {
6730        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
6731                                                           VALIDATION_ERROR_1e600918);
6732        skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e60090e,
6733                                             VALIDATION_ERROR_1e600912);
6734        skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e600910,
6735                                             VALIDATION_ERROR_1e600914);
6736        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6737                                      VALIDATION_ERROR_1e602415);
6738        skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
6739        skip |= ValidateBarriersToImages(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
6740        skip |= ValidateBarriers(dev_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
6741                                 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
6742                                 pImageMemoryBarriers);
6743        if (!skip) {
6744            auto first_event_index = cb_state->events.size();
6745            for (uint32_t i = 0; i < eventCount; ++i) {
6746                auto event_state = GetEventNode(dev_data, pEvents[i]);
6747                if (event_state) {
6748                    addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent}, cb_state);
6749                    event_state->cb_bindings.insert(cb_state);
6750                }
6751                cb_state->waitedEvents.insert(pEvents[i]);
6752                cb_state->events.push_back(pEvents[i]);
6753            }
6754            cb_state->eventUpdates.emplace_back([=](VkQueue q){
6755                return validateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask);
6756            });
6757            TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6758        }
6759    }
6760    lock.unlock();
6761    if (!skip)
6762        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
6763                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6764                                               imageMemoryBarrierCount, pImageMemoryBarriers);
6765}
6766
6767static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags srcStageMask,
6768                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
6769                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6770                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6771                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6772    bool skip = false;
6773    skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
6774                                                       VALIDATION_ERROR_1b80093e);
6775    skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
6776                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b802415);
6777    skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
6778    skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800920,
6779                                         VALIDATION_ERROR_1b800924);
6780    skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800922,
6781                                         VALIDATION_ERROR_1b800926);
6782    if (cb_state->activeRenderPass) {
6783        skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
6784                                                   dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6785                                                   pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6786        if (skip) return true;  // Early return to avoid redundant errors from below calls
6787    }
6788    skip |=
6789        ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
6790    skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
6791                             pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
6792                             pImageMemoryBarriers);
6793    return skip;
6794}
6795
6796static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
6797                                            uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6798    TransitionImageLayouts(device_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6799}
6800
6801VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
6802                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
6803                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6804                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6805                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6806    bool skip = false;
6807    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6808    unique_lock_t lock(global_lock);
6809    GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
6810    if (cb_state) {
6811        skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, dependencyFlags,
6812                                                  memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6813                                                  pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6814        if (!skip) {
6815            PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6816        }
6817    } else {
6818        assert(0);
6819    }
6820    lock.unlock();
6821    if (!skip) {
6822        device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
6823                                                       pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6824                                                       imageMemoryBarrierCount, pImageMemoryBarriers);
6825    }
6826}
6827
6828static bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
6829    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6830    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6831    if (pCB) {
6832        pCB->queryToStateMap[object] = value;
6833    }
6834    auto queue_data = dev_data->queueMap.find(queue);
6835    if (queue_data != dev_data->queueMap.end()) {
6836        queue_data->second.queryToStateMap[object] = value;
6837    }
6838    return false;
6839}
6840
6841VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
6842    bool skip = false;
6843    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6844    unique_lock_t lock(global_lock);
6845    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6846    if (pCB) {
6847        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6848                                      VALIDATION_ERROR_17802415);
6849        skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
6850    }
6851    lock.unlock();
6852
6853    if (skip) return;
6854
6855    dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
6856
6857    lock.lock();
6858    if (pCB) {
6859        QueryObject query = {queryPool, slot};
6860        pCB->activeQueries.insert(query);
6861        pCB->startedQueries.insert(query);
6862        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6863                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
6864    }
6865}
6866
6867VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
6868    bool skip = false;
6869    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6870    unique_lock_t lock(global_lock);
6871    QueryObject query = {queryPool, slot};
6872    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6873    if (cb_state) {
6874        if (!cb_state->activeQueries.count(query)) {
6875            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6876                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1ae00652, "DS",
6877                            "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
6878                            HandleToUint64(queryPool), slot, validation_error_map[VALIDATION_ERROR_1ae00652]);
6879        }
6880        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6881                                      VALIDATION_ERROR_1ae02415);
6882        skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
6883    }
6884    lock.unlock();
6885
6886    if (skip) return;
6887
6888    dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
6889
6890    lock.lock();
6891    if (cb_state) {
6892        cb_state->activeQueries.erase(query);
6893        cb_state->queryUpdates.emplace_back([=](VkQueue q){return setQueryState(q, commandBuffer, query, true);});
6894        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6895                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
6896    }
6897}
6898
6899VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
6900                                             uint32_t queryCount) {
6901    bool skip = false;
6902    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6903    unique_lock_t lock(global_lock);
6904    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6905        skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_1c600017);
6906        skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
6907        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6908                                      VALIDATION_ERROR_1c602415);
6909    lock.unlock();
6910
6911    if (skip) return;
6912
6913    dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
6914
6915    lock.lock();
6916    for (uint32_t i = 0; i < queryCount; i++) {
6917        QueryObject query = {queryPool, firstQuery + i};
6918        cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
6919        cb_state->queryUpdates.emplace_back([=](VkQueue q){return setQueryState(q, commandBuffer, query, false);});
6920    }
6921    addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6922                            {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
6923}
6924
6925static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
6926    QueryObject query = {queryPool, queryIndex};
6927    auto query_data = queue_data->queryToStateMap.find(query);
6928    if (query_data != queue_data->queryToStateMap.end()) {
6929        if (!query_data->second) return true;
6930    } else {
6931        auto it = dev_data->queryToStateMap.find(query);
6932        if (it == dev_data->queryToStateMap.end() || !it->second)
6933            return true;
6934    }
6935
6936    return false;
6937}
6938
6939static bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
6940    bool skip = false;
6941    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
6942    auto queue_data = GetQueueState(dev_data, queue);
6943    if (!queue_data) return false;
6944    for (uint32_t i = 0; i < queryCount; i++) {
6945        if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
6946            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6947                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
6948                            "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
6949                            HandleToUint64(queryPool), firstQuery + i);
6950        }
6951    }
6952    return skip;
6953}
6954
6955VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
6956                                                   uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
6957                                                   VkDeviceSize stride, VkQueryResultFlags flags) {
6958    bool skip = false;
6959    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6960    unique_lock_t lock(global_lock);
6961
6962    auto cb_node = GetCBNode(dev_data, commandBuffer);
6963    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
6964    if (cb_node && dst_buff_state) {
6965        skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400674);
6966        // Validate that DST buffer has correct usage flags set
6967        skip |=
6968            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_19400672,
6969                                     "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6970        skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
6971                                      VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_19402415);
6972        skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
6973        skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400017);
6974    }
6975    lock.unlock();
6976
6977    if (skip) return;
6978
6979    dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
6980                                                     stride, flags);
6981
6982    lock.lock();
6983    if (cb_node && dst_buff_state) {
6984        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
6985        cb_node->queue_submit_functions.emplace_back([=]() {
6986            SetBufferMemoryValid(dev_data, dst_buff_state, true);
6987            return false;
6988        });
6989        cb_node->queryUpdates.emplace_back([=](VkQueue q) {
6990            return validateQuery(q, cb_node, queryPool, firstQuery, queryCount);
6991        });
6992        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6993                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
6994    }
6995}
6996
6997VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
6998                                            uint32_t offset, uint32_t size, const void *pValues) {
6999    bool skip = false;
7000    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7001    unique_lock_t lock(global_lock);
7002    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
7003    if (cb_state) {
7004        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7005                                      VALIDATION_ERROR_1bc02415);
7006        skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
7007    }
7008    skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
7009    if (0 == stageFlags) {
7010        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7011                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc2dc03, "DS",
7012                        "vkCmdPushConstants() call has no stageFlags set. %s", validation_error_map[VALIDATION_ERROR_1bc2dc03]);
7013    }
7014
7015    // Check if specified push constant range falls within a pipeline-defined range which has matching stageFlags.
7016    // The spec doesn't seem to disallow having multiple push constant ranges with the
7017    // same offset and size, but different stageFlags.  So we can't just check the
7018    // stageFlags in the first range with matching offset and size.
7019    if (!skip) {
7020        const auto &ranges = getPipelineLayout(dev_data, layout)->push_constant_ranges;
7021        bool found_matching_range = false;
7022        for (const auto &range : ranges) {
7023            if ((stageFlags == range.stageFlags) && (offset >= range.offset) && (offset + size <= range.offset + range.size)) {
7024                found_matching_range = true;
7025                break;
7026            }
7027        }
7028        if (!found_matching_range) {
7029            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7030                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc002de, "DS",
7031                            "vkCmdPushConstants() stageFlags = 0x%" PRIx32
7032                            " do not match the stageFlags in any of the ranges with"
7033                            " offset = %d and size = %d in pipeline layout 0x%" PRIx64 ". %s",
7034                            (uint32_t)stageFlags, offset, size, HandleToUint64(layout),
7035                            validation_error_map[VALIDATION_ERROR_1bc002de]);
7036        }
7037    }
7038    lock.unlock();
7039    if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
7040}
7041
7042VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
7043                                             VkQueryPool queryPool, uint32_t slot) {
7044    bool skip = false;
7045    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7046    unique_lock_t lock(global_lock);
7047    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
7048    if (cb_state) {
7049        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7050                                      VALIDATION_ERROR_1e802415);
7051        skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
7052    }
7053    lock.unlock();
7054
7055    if (skip) return;
7056
7057    dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
7058
7059    lock.lock();
7060    if (cb_state) {
7061        QueryObject query = {queryPool, slot};
7062        cb_state->queryUpdates.emplace_back([=](VkQueue q) {return setQueryState(q, commandBuffer, query, true);});
7063    }
7064}
7065
7066static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
7067                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
7068                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
7069    bool skip = false;
7070
7071    for (uint32_t attach = 0; attach < count; attach++) {
7072        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
7073            // Attachment counts are verified elsewhere, but prevent an invalid access
7074            if (attachments[attach].attachment < fbci->attachmentCount) {
7075                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
7076                auto view_state = GetImageViewState(dev_data, *image_view);
7077                if (view_state) {
7078                    const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
7079                    if (ici != nullptr) {
7080                        if ((ici->usage & usage_flag) == 0) {
7081                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7082                                            VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, error_code, "DS",
7083                                            "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
7084                                            "IMAGE_USAGE flags (%s). %s",
7085                                            attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
7086                                            validation_error_map[error_code]);
7087                        }
7088                    }
7089                }
7090            }
7091        }
7092    }
7093    return skip;
7094}
7095
7096// Validate VkFramebufferCreateInfo which includes:
7097// 1. attachmentCount equals renderPass attachmentCount
7098// 2. corresponding framebuffer and renderpass attachments have matching formats
7099// 3. corresponding framebuffer and renderpass attachments have matching sample counts
7100// 4. fb attachments only have a single mip level
7101// 5. fb attachment dimensions are each at least as large as the fb
7102// 6. fb attachments use idenity swizzle
7103// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
7104// 8. fb dimensions are within physical device limits
7105static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
7106    bool skip = false;
7107
7108    auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
7109    if (rp_state) {
7110        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
7111        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
7112            skip |= log_msg(
7113                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7114                HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006d8, "DS",
7115                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
7116                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
7117                pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass),
7118                validation_error_map[VALIDATION_ERROR_094006d8]);
7119        } else {
7120            // attachmentCounts match, so make sure corresponding attachment details line up
7121            const VkImageView *image_views = pCreateInfo->pAttachments;
7122            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
7123                auto view_state = GetImageViewState(dev_data, image_views[i]);
7124                auto &ivci = view_state->create_info;
7125                if (ivci.format != rpci->pAttachments[i].format) {
7126                    skip |= log_msg(
7127                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7128                        HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e0, "DS",
7129                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
7130                        "the format of "
7131                        "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
7132                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
7133                        HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e0]);
7134                }
7135                const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
7136                if (ici->samples != rpci->pAttachments[i].samples) {
7137                    skip |= log_msg(
7138                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7139                        HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e2, "DS",
7140                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
7141                        "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
7142                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
7143                        HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e2]);
7144                }
7145                // Verify that view only has a single mip level
7146                if (ivci.subresourceRange.levelCount != 1) {
7147                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7148                                    0, __LINE__, VALIDATION_ERROR_094006e6, "DS",
7149                                    "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
7150                                    "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
7151                                    i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_094006e6]);
7152                }
7153                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
7154                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
7155                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
7156                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
7157                    (mip_height < pCreateInfo->height)) {
7158                    skip |= log_msg(
7159                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7160                        VALIDATION_ERROR_094006e4, "DS",
7161                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
7162                        "than the corresponding framebuffer dimensions. Here are the respective dimensions for attachment #%u, "
7163                        "framebuffer:\n"
7164                        "width: %u, %u\n"
7165                        "height: %u, %u\n"
7166                        "layerCount: %u, %u\n%s",
7167                        i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height, pCreateInfo->height,
7168                        ivci.subresourceRange.layerCount, pCreateInfo->layers, validation_error_map[VALIDATION_ERROR_094006e4]);
7169                }
7170                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
7171                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
7172                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
7173                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
7174                    skip |= log_msg(
7175                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7176                        VALIDATION_ERROR_094006e8, "DS",
7177                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
7178                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
7179                        "r swizzle = %s\n"
7180                        "g swizzle = %s\n"
7181                        "b swizzle = %s\n"
7182                        "a swizzle = %s\n"
7183                        "%s",
7184                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
7185                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
7186                        validation_error_map[VALIDATION_ERROR_094006e8]);
7187                }
7188            }
7189        }
7190        // Verify correct attachment usage flags
7191        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
7192            // Verify input attachments:
7193            skip |=
7194                MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
7195                           pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_094006de);
7196            // Verify color attachments:
7197            skip |=
7198                MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
7199                           pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_094006da);
7200            // Verify depth/stencil attachments:
7201            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
7202                skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
7203                                   VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_094006dc);
7204            }
7205        }
7206    }
7207    // Verify FB dimensions are within physical device limits
7208    if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
7209        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7210                        VALIDATION_ERROR_094006ec, "DS",
7211                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
7212                        "Requested width: %u, device max: %u\n"
7213                        "%s",
7214                        pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
7215                        validation_error_map[VALIDATION_ERROR_094006ec]);
7216    }
7217    if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
7218        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7219                        VALIDATION_ERROR_094006f0, "DS",
7220                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
7221                        "Requested height: %u, device max: %u\n"
7222                        "%s",
7223                        pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
7224                        validation_error_map[VALIDATION_ERROR_094006f0]);
7225    }
7226    if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
7227        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7228                        VALIDATION_ERROR_094006f4, "DS",
7229                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
7230                        "Requested layers: %u, device max: %u\n"
7231                        "%s",
7232                        pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
7233                        validation_error_map[VALIDATION_ERROR_094006f4]);
7234    }
7235    // Verify FB dimensions are greater than zero
7236    if (pCreateInfo->width <= 0) {
7237        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7238                        VALIDATION_ERROR_094006ea, "DS",
7239                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero. %s",
7240                        validation_error_map[VALIDATION_ERROR_094006ea]);
7241    }
7242    if (pCreateInfo->height <= 0) {
7243        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7244                        VALIDATION_ERROR_094006ee, "DS",
7245                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero. %s",
7246                        validation_error_map[VALIDATION_ERROR_094006ee]);
7247    }
7248    if (pCreateInfo->layers <= 0) {
7249        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7250                        VALIDATION_ERROR_094006f2, "DS",
7251                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero. %s",
7252                        validation_error_map[VALIDATION_ERROR_094006f2]);
7253    }
7254    return skip;
7255}
7256
7257// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
7258//  Return true if an error is encountered and callback returns true to skip call down chain
7259//   false indicates that call down chain should proceed
7260static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
7261    // TODO : Verify that renderPass FB is created with is compatible with FB
7262    bool skip = false;
7263    skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
7264    return skip;
7265}
7266
7267// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
7268static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
7269    // Shadow create info and store in map
7270    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
7271        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
7272
7273    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
7274        VkImageView view = pCreateInfo->pAttachments[i];
7275        auto view_state = GetImageViewState(dev_data, view);
7276        if (!view_state) {
7277            continue;
7278        }
7279        MT_FB_ATTACHMENT_INFO fb_info;
7280        fb_info.view_state = view_state;
7281        fb_info.image = view_state->create_info.image;
7282        fb_state->attachments.push_back(fb_info);
7283    }
7284    dev_data->frameBufferMap[fb] = std::move(fb_state);
7285}
7286
7287VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
7288                                                 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
7289    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7290    unique_lock_t lock(global_lock);
7291    bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
7292    lock.unlock();
7293
7294    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
7295
7296    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
7297
7298    if (VK_SUCCESS == result) {
7299        lock.lock();
7300        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
7301        lock.unlock();
7302    }
7303    return result;
7304}
7305
7306static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
7307                           std::unordered_set<uint32_t> &processed_nodes) {
7308    // If we have already checked this node we have not found a dependency path so return false.
7309    if (processed_nodes.count(index)) return false;
7310    processed_nodes.insert(index);
7311    const DAGNode &node = subpass_to_node[index];
7312    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
7313    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
7314        for (auto elem : node.prev) {
7315            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
7316        }
7317    } else {
7318        return true;
7319    }
7320    return false;
7321}
7322
7323static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
7324                                  const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
7325                                  bool &skip) {
7326    bool result = true;
7327    // Loop through all subpasses that share the same attachment and make sure a dependency exists
7328    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
7329        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
7330        const DAGNode &node = subpass_to_node[subpass];
7331        // Check for a specified dependency between the two nodes. If one exists we are done.
7332        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
7333        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
7334        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
7335            // If no dependency exits an implicit dependency still might. If not, throw an error.
7336            std::unordered_set<uint32_t> processed_nodes;
7337            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
7338                  FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
7339                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7340                                __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7341                                "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
7342                                dependent_subpasses[k]);
7343                result = false;
7344            }
7345        }
7346    }
7347    return result;
7348}
7349
7350static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
7351                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
7352    const DAGNode &node = subpass_to_node[index];
7353    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
7354    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
7355    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7356        if (attachment == subpass.pColorAttachments[j].attachment) return true;
7357    }
7358    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7359        if (attachment == subpass.pInputAttachments[j].attachment) return true;
7360    }
7361    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7362        if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
7363    }
7364    bool result = false;
7365    // Loop through previous nodes and see if any of them write to the attachment.
7366    for (auto elem : node.prev) {
7367        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
7368    }
7369    // If the attachment was written to by a previous node than this node needs to preserve it.
7370    if (result && depth > 0) {
7371        bool has_preserved = false;
7372        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
7373            if (subpass.pPreserveAttachments[j] == attachment) {
7374                has_preserved = true;
7375                break;
7376            }
7377        }
7378        if (!has_preserved) {
7379            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7380                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7381                            "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
7382        }
7383    }
7384    return result;
7385}
7386
7387template <class T>
7388bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
7389    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
7390           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
7391}
7392
7393bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
7394    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
7395            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
7396}
7397
7398static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
7399                                 RENDER_PASS_STATE const *renderPass) {
7400    bool skip = false;
7401    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
7402    auto const pCreateInfo = renderPass->createInfo.ptr();
7403    auto const &subpass_to_node = renderPass->subpassToNode;
7404    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
7405    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
7406    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
7407    // Find overlapping attachments
7408    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
7409        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
7410            VkImageView viewi = pFramebufferInfo->pAttachments[i];
7411            VkImageView viewj = pFramebufferInfo->pAttachments[j];
7412            if (viewi == viewj) {
7413                overlapping_attachments[i].push_back(j);
7414                overlapping_attachments[j].push_back(i);
7415                continue;
7416            }
7417            auto view_state_i = GetImageViewState(dev_data, viewi);
7418            auto view_state_j = GetImageViewState(dev_data, viewj);
7419            if (!view_state_i || !view_state_j) {
7420                continue;
7421            }
7422            auto view_ci_i = view_state_i->create_info;
7423            auto view_ci_j = view_state_j->create_info;
7424            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
7425                overlapping_attachments[i].push_back(j);
7426                overlapping_attachments[j].push_back(i);
7427                continue;
7428            }
7429            auto image_data_i = GetImageState(dev_data, view_ci_i.image);
7430            auto image_data_j = GetImageState(dev_data, view_ci_j.image);
7431            if (!image_data_i || !image_data_j) {
7432                continue;
7433            }
7434            if (image_data_i->binding.mem == image_data_j->binding.mem &&
7435                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
7436                                   image_data_j->binding.size)) {
7437                overlapping_attachments[i].push_back(j);
7438                overlapping_attachments[j].push_back(i);
7439            }
7440        }
7441    }
7442    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
7443        uint32_t attachment = i;
7444        for (auto other_attachment : overlapping_attachments[i]) {
7445            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
7446                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7447                                HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
7448                                "Attachment %d aliases attachment %d but doesn't "
7449                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
7450                                attachment, other_attachment, validation_error_map[VALIDATION_ERROR_12200682]);
7451            }
7452            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
7453                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7454                                HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
7455                                "Attachment %d aliases attachment %d but doesn't "
7456                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
7457                                other_attachment, attachment, validation_error_map[VALIDATION_ERROR_12200682]);
7458            }
7459        }
7460    }
7461    // Find for each attachment the subpasses that use them.
7462    unordered_set<uint32_t> attachmentIndices;
7463    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7464        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7465        attachmentIndices.clear();
7466        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7467            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7468            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7469            input_attachment_to_subpass[attachment].push_back(i);
7470            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7471                input_attachment_to_subpass[overlapping_attachment].push_back(i);
7472            }
7473        }
7474        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7475            uint32_t attachment = subpass.pColorAttachments[j].attachment;
7476            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7477            output_attachment_to_subpass[attachment].push_back(i);
7478            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7479                output_attachment_to_subpass[overlapping_attachment].push_back(i);
7480            }
7481            attachmentIndices.insert(attachment);
7482        }
7483        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7484            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
7485            output_attachment_to_subpass[attachment].push_back(i);
7486            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7487                output_attachment_to_subpass[overlapping_attachment].push_back(i);
7488            }
7489
7490            if (attachmentIndices.count(attachment)) {
7491                skip |=
7492                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7493                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7494                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
7495            }
7496        }
7497    }
7498    // If there is a dependency needed make sure one exists
7499    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7500        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7501        // If the attachment is an input then all subpasses that output must have a dependency relationship
7502        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7503            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7504            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7505            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7506        }
7507        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
7508        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7509            uint32_t attachment = subpass.pColorAttachments[j].attachment;
7510            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7511            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7512            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
7513        }
7514        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7515            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
7516            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7517            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
7518        }
7519    }
7520    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
7521    // written.
7522    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7523        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7524        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7525            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
7526        }
7527    }
7528    return skip;
7529}
7530
7531static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
7532                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency,
7533                          std::vector<int32_t> &subpass_to_dep_index) {
7534    bool skip = false;
7535    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7536        DAGNode &subpass_node = subpass_to_node[i];
7537        subpass_node.pass = i;
7538        subpass_to_dep_index[i] = -1;  // Default to no dependency and overwrite below as needed
7539    }
7540    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
7541        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
7542        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
7543            if (dependency.srcSubpass == dependency.dstSubpass) {
7544                skip |=
7545                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7546                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
7547            }
7548        } else if (dependency.srcSubpass > dependency.dstSubpass) {
7549            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7550                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7551                            "Dependency graph must be specified such that an earlier pass cannot depend on a later pass.");
7552        } else if (dependency.srcSubpass == dependency.dstSubpass) {
7553            has_self_dependency[dependency.srcSubpass] = true;
7554        } else {
7555            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
7556            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
7557        }
7558        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
7559            subpass_to_dep_index[dependency.srcSubpass] = i;
7560        }
7561    }
7562    return skip;
7563}
7564
7565VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
7566                                                  const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
7567    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7568    bool spirv_valid;
7569
7570    if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid))
7571        return VK_ERROR_VALIDATION_FAILED_EXT;
7572
7573    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
7574
7575    if (res == VK_SUCCESS) {
7576        lock_guard_t lock(global_lock);
7577        unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo) : new shader_module());
7578        dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
7579    }
7580    return res;
7581}
7582
7583static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
7584    bool skip = false;
7585    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
7586        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7587                        VALIDATION_ERROR_12200684, "DS",
7588                        "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s", type,
7589                        attachment, attachment_count, validation_error_map[VALIDATION_ERROR_12200684]);
7590    }
7591    return skip;
7592}
7593
7594static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
7595
7596static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
7597    bool skip = false;
7598    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7599        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7600        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
7601            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7602                            __LINE__, VALIDATION_ERROR_14000698, "DS",
7603                            "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s", i,
7604                            validation_error_map[VALIDATION_ERROR_14000698]);
7605        }
7606
7607        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
7608            uint32_t attachment = subpass.pPreserveAttachments[j];
7609            if (attachment == VK_ATTACHMENT_UNUSED) {
7610                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7611                                __LINE__, VALIDATION_ERROR_140006aa, "DS",
7612                                "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
7613                                validation_error_map[VALIDATION_ERROR_140006aa]);
7614            } else {
7615                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
7616
7617                bool found = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment);
7618                for (uint32_t r = 0; !found && r < subpass.inputAttachmentCount; ++r) {
7619                    found = (subpass.pInputAttachments[r].attachment == attachment);
7620                }
7621                for (uint32_t r = 0; !found && r < subpass.colorAttachmentCount; ++r) {
7622                    found = (subpass.pColorAttachments[r].attachment == attachment) ||
7623                            (subpass.pResolveAttachments != NULL && subpass.pResolveAttachments[r].attachment == attachment);
7624                }
7625                if (found) {
7626                    skip |= log_msg(
7627                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7628                        VALIDATION_ERROR_140006ac, "DS",
7629                        "CreateRenderPass: subpass %u pPreserveAttachments[%u] (%u) must not be used elsewhere in the subpass. %s",
7630                        i, j, attachment, validation_error_map[VALIDATION_ERROR_140006ac]);
7631                }
7632            }
7633        }
7634
7635        auto subpass_performs_resolve =
7636            subpass.pResolveAttachments &&
7637            std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
7638                        [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
7639
7640        unsigned sample_count = 0;
7641
7642        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7643            uint32_t attachment;
7644            if (subpass.pResolveAttachments) {
7645                attachment = subpass.pResolveAttachments[j].attachment;
7646                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
7647
7648                if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
7649                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
7650                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7651                                    0, __LINE__, VALIDATION_ERROR_140006a2, "DS",
7652                                    "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
7653                                    "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
7654                                    i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
7655                                    validation_error_map[VALIDATION_ERROR_140006a2]);
7656                }
7657
7658                if (!skip && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
7659                    subpass.pColorAttachments[j].attachment == VK_ATTACHMENT_UNUSED) {
7660                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7661                                    0, __LINE__, VALIDATION_ERROR_1400069e, "DS",
7662                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
7663                                    "which has attachment=VK_ATTACHMENT_UNUSED. %s",
7664                                    i, attachment, validation_error_map[VALIDATION_ERROR_1400069e]);
7665                }
7666            }
7667            attachment = subpass.pColorAttachments[j].attachment;
7668            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
7669
7670            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
7671                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
7672
7673                if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
7674                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7675                                    0, __LINE__, VALIDATION_ERROR_140006a0, "DS",
7676                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
7677                                    "which has VK_SAMPLE_COUNT_1_BIT. %s",
7678                                    i, attachment, validation_error_map[VALIDATION_ERROR_140006a0]);
7679                }
7680
7681                if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
7682                    const auto &color_desc = pCreateInfo->pAttachments[attachment];
7683                    const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
7684                    if (color_desc.format != resolve_desc.format) {
7685                        skip |=
7686                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7687                                    0, __LINE__, VALIDATION_ERROR_140006a4, "DS",
7688                                    "CreateRenderPass:  Subpass %u pColorAttachments[%u] resolves to an attachment with a "
7689                                    "different format. "
7690                                    "color format: %u, resolve format: %u. %s",
7691                                    i, j, color_desc.format, resolve_desc.format, validation_error_map[VALIDATION_ERROR_140006a4]);
7692                    }
7693                }
7694            }
7695        }
7696
7697        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7698            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
7699            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
7700
7701            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
7702                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
7703            }
7704        }
7705
7706        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7707            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7708            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
7709        }
7710
7711        if (sample_count && !IsPowerOfTwo(sample_count)) {
7712            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7713                            __LINE__, VALIDATION_ERROR_0082b401, "DS",
7714                            "CreateRenderPass:  Subpass %u attempts to render to "
7715                            "attachments with inconsistent sample counts. %s",
7716                            i, validation_error_map[VALIDATION_ERROR_0082b401]);
7717        }
7718    }
7719    return skip;
7720}
7721
7722static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass,
7723                                   uint32_t index,
7724                                   bool is_read) {
7725    if (index == VK_ATTACHMENT_UNUSED)
7726        return;
7727
7728    if (!render_pass->attachment_first_read.count(index))
7729        render_pass->attachment_first_read[index] = is_read;
7730}
7731
7732VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
7733                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
7734    bool skip = false;
7735    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7736
7737    unique_lock_t lock(global_lock);
7738    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
7739    //       ValidateLayouts.
7740    skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
7741    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
7742        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
7743                                             VALIDATION_ERROR_13e006b8, VALIDATION_ERROR_13e006bc);
7744        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
7745                                             VALIDATION_ERROR_13e006ba, VALIDATION_ERROR_13e006be);
7746    }
7747    if (!skip) {
7748        skip |= ValidateLayouts(dev_data, device, pCreateInfo);
7749    }
7750    lock.unlock();
7751
7752    if (skip) {
7753        return VK_ERROR_VALIDATION_FAILED_EXT;
7754    }
7755
7756    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
7757
7758    if (VK_SUCCESS == result) {
7759        lock.lock();
7760
7761        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
7762        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
7763        std::vector<int32_t> subpass_to_dep_index(pCreateInfo->subpassCount);
7764        skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency, subpass_to_dep_index);
7765
7766        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
7767        render_pass->renderPass = *pRenderPass;
7768        render_pass->hasSelfDependency = has_self_dependency;
7769        render_pass->subpassToNode = subpass_to_node;
7770        render_pass->subpass_to_dependency_index = subpass_to_dep_index;
7771
7772        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7773            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7774            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7775                MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
7776
7777                // resolve attachments are considered to be written
7778                if (subpass.pResolveAttachments) {
7779                    MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
7780                }
7781            }
7782            if (subpass.pDepthStencilAttachment) {
7783                MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
7784            }
7785            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7786                MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
7787            }
7788        }
7789
7790        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
7791    }
7792    return result;
7793}
7794
7795static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
7796                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
7797    bool skip = false;
7798    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7799        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7800                        HandleToUint64(pCB->commandBuffer), __LINE__, error_code, "DS",
7801                        "Cannot execute command %s on a secondary command buffer. %s", cmd_name, validation_error_map[error_code]);
7802    }
7803    return skip;
7804}
7805
7806static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
7807    bool skip = false;
7808    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
7809        &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
7810    if (pRenderPassBegin->renderArea.offset.x < 0 ||
7811        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
7812        pRenderPassBegin->renderArea.offset.y < 0 ||
7813        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
7814        skip |= static_cast<bool>(log_msg(
7815            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7816            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
7817            "Cannot execute a render pass with renderArea not within the bound of the "
7818            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
7819            "height %d.",
7820            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
7821            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
7822    }
7823    return skip;
7824}
7825
7826// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
7827// [load|store]Op flag must be checked
7828// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
7829template <typename T>
7830static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
7831    if (color_depth_op != op && stencil_op != op) {
7832        return false;
7833    }
7834    bool check_color_depth_load_op = !FormatIsStencilOnly(format);
7835    bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
7836
7837    return ((check_color_depth_load_op && (color_depth_op == op)) ||
7838            (check_stencil_load_op && (stencil_op == op)));
7839}
7840
7841VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
7842                                              VkSubpassContents contents) {
7843    bool skip = false;
7844    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7845    unique_lock_t lock(global_lock);
7846    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
7847    auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
7848    auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
7849    if (cb_node) {
7850        if (render_pass_state) {
7851            uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
7852            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
7853            for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
7854                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
7855                auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
7856                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
7857                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
7858                    clear_op_size = static_cast<uint32_t>(i) + 1;
7859                    std::function<bool()> function = [=]() {
7860                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
7861                        return false;
7862                    };
7863                    cb_node->queue_submit_functions.push_back(function);
7864                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
7865                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
7866                    std::function<bool()> function = [=]() {
7867                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
7868                        return false;
7869                    };
7870                    cb_node->queue_submit_functions.push_back(function);
7871                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
7872                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
7873                    std::function<bool()> function = [=]() {
7874                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
7875                                                          "vkCmdBeginRenderPass()");
7876                    };
7877                    cb_node->queue_submit_functions.push_back(function);
7878                }
7879                if (render_pass_state->attachment_first_read[i]) {
7880                    std::function<bool()> function = [=]() {
7881                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
7882                                                          "vkCmdBeginRenderPass()");
7883                    };
7884                    cb_node->queue_submit_functions.push_back(function);
7885                }
7886            }
7887            if (clear_op_size > pRenderPassBegin->clearValueCount) {
7888                skip |= log_msg(
7889                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7890                    HandleToUint64(render_pass_state->renderPass), __LINE__, VALIDATION_ERROR_1200070c, "DS",
7891                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
7892                    "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
7893                    "0x%" PRIx64
7894                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
7895                    "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
7896                    "attachments that aren't cleared they will be ignored. %s",
7897                    pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass), clear_op_size,
7898                    clear_op_size - 1, validation_error_map[VALIDATION_ERROR_1200070c]);
7899            }
7900            skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
7901            skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
7902                                                          GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
7903            skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00017);
7904            skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
7905            skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00019);
7906            skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
7907                                          VALIDATION_ERROR_17a02415);
7908            skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
7909            cb_node->activeRenderPass = render_pass_state;
7910            // This is a shallow copy as that is all that is needed for now
7911            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
7912            cb_node->activeSubpass = 0;
7913            cb_node->activeSubpassContents = contents;
7914            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
7915            // Connect this framebuffer and its children to this cmdBuffer
7916            AddFramebufferBinding(dev_data, cb_node, framebuffer);
7917            // transition attachments to the correct layouts for beginning of renderPass and first subpass
7918            TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
7919        }
7920    }
7921    lock.unlock();
7922    if (!skip) {
7923        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
7924    }
7925}
7926
7927VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
7928    bool skip = false;
7929    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7930    unique_lock_t lock(global_lock);
7931    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7932    if (pCB) {
7933        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600019);
7934        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b602415);
7935        skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
7936        skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600017);
7937
7938        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
7939        if (pCB->activeSubpass == subpassCount - 1) {
7940            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7941                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1b60071a, "DS",
7942                            "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s",
7943                            validation_error_map[VALIDATION_ERROR_1b60071a]);
7944        }
7945    }
7946    lock.unlock();
7947
7948    if (skip) return;
7949
7950    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
7951
7952    if (pCB) {
7953        lock.lock();
7954        pCB->activeSubpass++;
7955        pCB->activeSubpassContents = contents;
7956        TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
7957                                 GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
7958    }
7959}
7960
7961VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
7962    bool skip = false;
7963    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7964    unique_lock_t lock(global_lock);
7965    auto pCB = GetCBNode(dev_data, commandBuffer);
7966    FRAMEBUFFER_STATE *framebuffer = NULL;
7967    if (pCB) {
7968        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
7969        framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
7970        if (rp_state) {
7971            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
7972                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7973                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
7974                                VALIDATION_ERROR_1b00071c, "DS", "vkCmdEndRenderPass(): Called before reaching final subpass. %s",
7975                                validation_error_map[VALIDATION_ERROR_1b00071c]);
7976            }
7977
7978            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
7979                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
7980                auto pAttachment = &rp_state->createInfo.pAttachments[i];
7981                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
7982                                                         VK_ATTACHMENT_STORE_OP_STORE)) {
7983                    std::function<bool()> function = [=]() {
7984                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
7985                        return false;
7986                    };
7987                    pCB->queue_submit_functions.push_back(function);
7988                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
7989                                                                pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
7990                    std::function<bool()> function = [=]() {
7991                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
7992                        return false;
7993                    };
7994                    pCB->queue_submit_functions.push_back(function);
7995                }
7996            }
7997        }
7998        skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_1b000017);
7999        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_1b000019);
8000        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b002415);
8001        skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
8002    }
8003    lock.unlock();
8004
8005    if (skip) return;
8006
8007    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
8008
8009    if (pCB) {
8010        lock.lock();
8011        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
8012        pCB->activeRenderPass = nullptr;
8013        pCB->activeSubpass = 0;
8014        pCB->activeFramebuffer = VK_NULL_HANDLE;
8015    }
8016}
8017
8018static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
8019                                        uint32_t secondaryAttach, const char *msg) {
8020    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8021                   HandleToUint64(secondaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c4, "DS",
8022                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64
8023                   " which has a render pass "
8024                   "that is not compatible with the Primary Cmd Buffer current render pass. "
8025                   "Attachment %u is not compatible with %u: %s. %s",
8026                   HandleToUint64(secondaryBuffer), primaryAttach, secondaryAttach, msg,
8027                   validation_error_map[VALIDATION_ERROR_1b2000c4]);
8028}
8029
8030static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
8031                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
8032                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
8033                                            uint32_t secondaryAttach, bool is_multi) {
8034    bool skip = false;
8035    if (primaryPassCI->attachmentCount <= primaryAttach) {
8036        primaryAttach = VK_ATTACHMENT_UNUSED;
8037    }
8038    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
8039        secondaryAttach = VK_ATTACHMENT_UNUSED;
8040    }
8041    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
8042        return skip;
8043    }
8044    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
8045        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
8046                                            "The first is unused while the second is not.");
8047        return skip;
8048    }
8049    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
8050        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
8051                                            "The second is unused while the first is not.");
8052        return skip;
8053    }
8054    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
8055        skip |=
8056            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
8057    }
8058    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
8059        skip |=
8060            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
8061    }
8062    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
8063        skip |=
8064            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
8065    }
8066    return skip;
8067}
8068
8069static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
8070                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
8071                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
8072    bool skip = false;
8073    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
8074    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
8075    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
8076    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
8077        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
8078        if (i < primary_desc.inputAttachmentCount) {
8079            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
8080        }
8081        if (i < secondary_desc.inputAttachmentCount) {
8082            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
8083        }
8084        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
8085                                                secondaryPassCI, secondary_input_attach, is_multi);
8086    }
8087    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
8088    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
8089        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
8090        if (i < primary_desc.colorAttachmentCount) {
8091            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
8092        }
8093        if (i < secondary_desc.colorAttachmentCount) {
8094            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
8095        }
8096        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
8097                                                secondaryPassCI, secondary_color_attach, is_multi);
8098        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
8099        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
8100            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
8101        }
8102        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
8103            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
8104        }
8105        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach, secondaryBuffer,
8106                                                secondaryPassCI, secondary_resolve_attach, is_multi);
8107    }
8108    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
8109    if (primary_desc.pDepthStencilAttachment) {
8110        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
8111    }
8112    if (secondary_desc.pDepthStencilAttachment) {
8113        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
8114    }
8115    skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach, secondaryBuffer,
8116                                            secondaryPassCI, secondary_depthstencil_attach, is_multi);
8117    return skip;
8118}
8119
8120// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
8121//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
8122//  will then feed into this function
8123static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
8124                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
8125                                            VkRenderPassCreateInfo const *secondaryPassCI) {
8126    bool skip = false;
8127
8128    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
8129        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8130                        HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
8131                        "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
8132                        " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
8133                        " that has a subpassCount of %u.",
8134                        HandleToUint64(secondaryBuffer), secondaryPassCI->subpassCount, HandleToUint64(primaryBuffer),
8135                        primaryPassCI->subpassCount);
8136    } else {
8137        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
8138            skip |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
8139                                                 primaryPassCI->subpassCount > 1);
8140        }
8141    }
8142    return skip;
8143}
8144
8145static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
8146                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
8147    bool skip = false;
8148    if (!pSubCB->beginInfo.pInheritanceInfo) {
8149        return skip;
8150    }
8151    VkFramebuffer primary_fb = pCB->activeFramebuffer;
8152    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
8153    if (secondary_fb != VK_NULL_HANDLE) {
8154        if (primary_fb != secondary_fb) {
8155            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8156                            HandleToUint64(primaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c6, "DS",
8157                            "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
8158                            " which has a framebuffer 0x%" PRIx64
8159                            " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
8160                            HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb),
8161                            validation_error_map[VALIDATION_ERROR_1b2000c6]);
8162        }
8163        auto fb = GetFramebufferState(dev_data, secondary_fb);
8164        if (!fb) {
8165            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8166                            HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
8167                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
8168                            "which has invalid framebuffer 0x%" PRIx64 ".",
8169                            (void *)secondaryBuffer, HandleToUint64(secondary_fb));
8170            return skip;
8171        }
8172        auto cb_renderpass = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
8173        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
8174            skip |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
8175                                                    cb_renderpass->createInfo.ptr());
8176        }
8177    }
8178    return skip;
8179}
8180
8181static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
8182    bool skip = false;
8183    unordered_set<int> activeTypes;
8184    for (auto queryObject : pCB->activeQueries) {
8185        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
8186        if (queryPoolData != dev_data->queryPoolMap.end()) {
8187            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
8188                pSubCB->beginInfo.pInheritanceInfo) {
8189                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
8190                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
8191                    skip |= log_msg(
8192                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8193                        HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1b2000d0, "DS",
8194                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
8195                        "which has invalid active query pool 0x%" PRIx64
8196                        ". Pipeline statistics is being queried so the command "
8197                        "buffer must have all bits set on the queryPool. %s",
8198                        pCB->commandBuffer, HandleToUint64(queryPoolData->first), validation_error_map[VALIDATION_ERROR_1b2000d0]);
8199                }
8200            }
8201            activeTypes.insert(queryPoolData->second.createInfo.queryType);
8202        }
8203    }
8204    for (auto queryObject : pSubCB->startedQueries) {
8205        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
8206        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
8207            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8208                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
8209                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
8210                            "which has invalid active query pool 0x%" PRIx64
8211                            "of type %d but a query of that type has been started on "
8212                            "secondary Cmd Buffer 0x%p.",
8213                            pCB->commandBuffer, HandleToUint64(queryPoolData->first), queryPoolData->second.createInfo.queryType,
8214                            pSubCB->commandBuffer);
8215        }
8216    }
8217
8218    auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
8219    auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
8220    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
8221        skip |=
8222            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8223                    HandleToUint64(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
8224                    "vkCmdExecuteCommands(): Primary command buffer 0x%p"
8225                    " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
8226                    pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
8227    }
8228
8229    return skip;
8230}
8231
8232VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
8233                                              const VkCommandBuffer *pCommandBuffers) {
8234    bool skip = false;
8235    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8236    unique_lock_t lock(global_lock);
8237    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8238    if (pCB) {
8239        GLOBAL_CB_NODE *pSubCB = NULL;
8240        for (uint32_t i = 0; i < commandBuffersCount; i++) {
8241            pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
8242            assert(pSubCB);
8243            if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
8244                skip |=
8245                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8246                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000b0, "DS",
8247                            "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
8248                            "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
8249                            pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_1b2000b0]);
8250            } else if (pCB->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
8251                if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
8252                    auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
8253                    if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
8254                        skip |= log_msg(
8255                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8256                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000c0, "DS",
8257                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
8258                            ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT "
8259                            "set. %s",
8260                            pCommandBuffers[i], HandleToUint64(pCB->activeRenderPass->renderPass),
8261                            validation_error_map[VALIDATION_ERROR_1b2000c0]);
8262                    } else {
8263                        // Make sure render pass is compatible with parent command buffer pass if has continue
8264                        if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
8265                            skip |=
8266                                validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
8267                                                                pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
8268                        }
8269                        //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
8270                        skip |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
8271                        if (VK_NULL_HANDLE == pSubCB->activeFramebuffer) {
8272                            //  Inherit primary's activeFramebuffer and while running validate functions
8273                            for (auto &function : pSubCB->cmd_execute_commands_functions) {
8274                                skip |= function(pCB->activeFramebuffer);
8275                            }
8276                        }
8277                    }
8278                    string errorString = "";
8279                    // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
8280                    if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
8281                        !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
8282                                                         secondary_rp_state->createInfo.ptr(), errorString)) {
8283                        skip |= log_msg(
8284                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8285                            HandleToUint64(pCommandBuffers[i]), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
8286                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
8287                            ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
8288                            pCommandBuffers[i], HandleToUint64(pSubCB->beginInfo.pInheritanceInfo->renderPass), commandBuffer,
8289                            HandleToUint64(pCB->activeRenderPass->renderPass), errorString.c_str());
8290                    }
8291                }
8292            }
8293            // TODO(mlentine): Move more logic into this method
8294            skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
8295            skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0, VALIDATION_ERROR_1b2000b2);
8296            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
8297                if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
8298                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8299                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
8300                                    VALIDATION_ERROR_1b2000b4, "DS",
8301                                    "Attempt to simultaneously execute command buffer 0x%p"
8302                                    " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
8303                                    pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_1b2000b4]);
8304                }
8305                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
8306                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
8307                    skip |= log_msg(
8308                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8309                        HandleToUint64(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
8310                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
8311                        "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
8312                        "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
8313                        "set, even though it does.",
8314                        pCommandBuffers[i], pCB->commandBuffer);
8315                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
8316                }
8317            }
8318            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
8319                skip |=
8320                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8321                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000ca, "DS",
8322                            "vkCmdExecuteCommands(): Secondary Command Buffer "
8323                            "(0x%p) cannot be submitted with a query in "
8324                            "flight and inherited queries not "
8325                            "supported on this device. %s",
8326                            pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_1b2000ca]);
8327            }
8328            // TODO: separate validate from update! This is very tangled.
8329            // Propagate layout transitions to the primary cmd buffer
8330            for (auto ilm_entry : pSubCB->imageLayoutMap) {
8331                SetLayout(dev_data, pCB, ilm_entry.first, ilm_entry.second);
8332            }
8333            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
8334            pCB->linkedCommandBuffers.insert(pSubCB);
8335            pSubCB->linkedCommandBuffers.insert(pCB);
8336            for (auto &function : pSubCB->queryUpdates) {
8337                pCB->queryUpdates.push_back(function);
8338            }
8339        }
8340        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_1b200019);
8341        skip |=
8342            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
8343                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b202415);
8344        skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
8345    }
8346    lock.unlock();
8347    if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
8348}
8349
8350VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
8351                                         void **ppData) {
8352    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8353
8354    bool skip = false;
8355    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8356    unique_lock_t lock(global_lock);
8357    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
8358    if (mem_info) {
8359        // TODO : This could me more fine-grained to track just region that is valid
8360        mem_info->global_valid = true;
8361        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
8362        skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
8363        // TODO : Do we need to create new "bound_range" for the mapped range?
8364        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
8365        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
8366             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
8367            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8368                           HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200554, "MEM",
8369                           "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
8370                           HandleToUint64(mem), validation_error_map[VALIDATION_ERROR_31200554]);
8371        }
8372    }
8373    skip |= ValidateMapMemRange(dev_data, mem, offset, size);
8374    lock.unlock();
8375
8376    if (!skip) {
8377        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
8378        if (VK_SUCCESS == result) {
8379            lock.lock();
8380            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
8381            storeMemRanges(dev_data, mem, offset, size);
8382            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
8383            lock.unlock();
8384        }
8385    }
8386    return result;
8387}
8388
8389VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
8390    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8391    bool skip = false;
8392
8393    unique_lock_t lock(global_lock);
8394    skip |= deleteMemRanges(dev_data, mem);
8395    lock.unlock();
8396    if (!skip) {
8397        dev_data->dispatch_table.UnmapMemory(device, mem);
8398    }
8399}
8400
8401static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
8402                                   const VkMappedMemoryRange *pMemRanges) {
8403    bool skip = false;
8404    for (uint32_t i = 0; i < memRangeCount; ++i) {
8405        auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
8406        if (mem_info) {
8407            if (pMemRanges[i].size == VK_WHOLE_SIZE) {
8408                if (mem_info->mem_range.offset > pMemRanges[i].offset) {
8409                    skip |=
8410                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8411                                HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055c, "MEM",
8412                                "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
8413                                ") is less than Memory Object's offset "
8414                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
8415                                funcName, static_cast<size_t>(pMemRanges[i].offset),
8416                                static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_0c20055c]);
8417                }
8418            } else {
8419                const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
8420                                              ? mem_info->alloc_info.allocationSize
8421                                              : (mem_info->mem_range.offset + mem_info->mem_range.size);
8422                if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
8423                    (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
8424                    skip |=
8425                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8426                                HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055a, "MEM",
8427                                "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
8428                                ") exceed the Memory Object's upper-bound "
8429                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
8430                                funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
8431                                static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
8432                                validation_error_map[VALIDATION_ERROR_0c20055a]);
8433                }
8434            }
8435        }
8436    }
8437    return skip;
8438}
8439
8440static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
8441                                                     const VkMappedMemoryRange *mem_ranges) {
8442    bool skip = false;
8443    for (uint32_t i = 0; i < mem_range_count; ++i) {
8444        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
8445        if (mem_info) {
8446            if (mem_info->shadow_copy) {
8447                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
8448                                        ? mem_info->mem_range.size
8449                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
8450                char *data = static_cast<char *>(mem_info->shadow_copy);
8451                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
8452                    if (data[j] != NoncoherentMemoryFillValue) {
8453                        skip |= log_msg(
8454                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8455                            HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
8456                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, HandleToUint64(mem_ranges[i].memory));
8457                    }
8458                }
8459                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
8460                    if (data[j] != NoncoherentMemoryFillValue) {
8461                        skip |= log_msg(
8462                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8463                            HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
8464                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, HandleToUint64(mem_ranges[i].memory));
8465                    }
8466                }
8467                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
8468            }
8469        }
8470    }
8471    return skip;
8472}
8473
8474static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
8475    for (uint32_t i = 0; i < mem_range_count; ++i) {
8476        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
8477        if (mem_info && mem_info->shadow_copy) {
8478            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
8479                                    ? mem_info->mem_range.size
8480                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
8481            char *data = static_cast<char *>(mem_info->shadow_copy);
8482            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
8483        }
8484    }
8485}
8486
8487static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
8488                                                  const VkMappedMemoryRange *mem_ranges) {
8489    bool skip = false;
8490    for (uint32_t i = 0; i < mem_range_count; ++i) {
8491        uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
8492        if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
8493            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8494                            HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c20055e, "MEM",
8495                            "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
8496                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
8497                            func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_0c20055e]);
8498        }
8499        if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
8500            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8501                            HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c200560, "MEM",
8502                            "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
8503                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
8504                            func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_0c200560]);
8505        }
8506    }
8507    return skip;
8508}
8509
8510static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8511                                                   const VkMappedMemoryRange *mem_ranges) {
8512    bool skip = false;
8513    lock_guard_t lock(global_lock);
8514    skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
8515    skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
8516    return skip;
8517}
8518
8519VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
8520                                                       const VkMappedMemoryRange *pMemRanges) {
8521    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8522    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8523
8524    if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
8525        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
8526    }
8527    return result;
8528}
8529
8530static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8531                                                        const VkMappedMemoryRange *mem_ranges) {
8532    bool skip = false;
8533    lock_guard_t lock(global_lock);
8534    skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
8535    return skip;
8536}
8537
8538static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8539                                                       const VkMappedMemoryRange *mem_ranges) {
8540    lock_guard_t lock(global_lock);
8541    // Update our shadow copy with modified driver data
8542    CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
8543}
8544
8545VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
8546                                                            const VkMappedMemoryRange *pMemRanges) {
8547    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8548    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8549
8550    if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
8551        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
8552        if (result == VK_SUCCESS) {
8553            PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
8554        }
8555    }
8556    return result;
8557}
8558
8559static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
8560                                           VkDeviceSize memoryOffset) {
8561    bool skip = false;
8562    if (image_state) {
8563        unique_lock_t lock(global_lock);
8564        // Track objects tied to memory
8565        uint64_t image_handle = HandleToUint64(image);
8566        skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
8567        if (!image_state->memory_requirements_checked) {
8568            // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
8569            // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
8570            // vkGetImageMemoryRequirements()
8571            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8572                            image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
8573                            "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
8574                            " but vkGetImageMemoryRequirements() has not been called on that image.",
8575                            image_handle);
8576            // Make the call for them so we can verify the state
8577            lock.unlock();
8578            dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
8579            lock.lock();
8580        }
8581
8582        // Validate bound memory range information
8583        auto mem_info = GetMemObjInfo(dev_data, mem);
8584        if (mem_info) {
8585            skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
8586                                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, "vkBindImageMemory()");
8587            skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
8588                                        VALIDATION_ERROR_1740082e);
8589        }
8590
8591        // Validate memory requirements alignment
8592        if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
8593            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8594                            image_handle, __LINE__, VALIDATION_ERROR_17400830, "DS",
8595                            "vkBindImageMemory(): memoryOffset is 0x%" PRIxLEAST64
8596                            " but must be an integer multiple of the "
8597                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
8598                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
8599                            memoryOffset, image_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_17400830]);
8600        }
8601
8602        // Validate memory requirements size
8603        if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
8604            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8605                            image_handle, __LINE__, VALIDATION_ERROR_17400832, "DS",
8606                            "vkBindImageMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
8607                            " but must be at least as large as "
8608                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
8609                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
8610                            mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size,
8611                            validation_error_map[VALIDATION_ERROR_17400832]);
8612        }
8613    }
8614    return skip;
8615}
8616
8617static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
8618                                          VkDeviceSize memoryOffset) {
8619    if (image_state) {
8620        unique_lock_t lock(global_lock);
8621        // Track bound memory range information
8622        auto mem_info = GetMemObjInfo(dev_data, mem);
8623        if (mem_info) {
8624            InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
8625                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
8626        }
8627
8628        // Track objects tied to memory
8629        uint64_t image_handle = HandleToUint64(image);
8630        SetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
8631
8632        image_state->binding.mem = mem;
8633        image_state->binding.offset = memoryOffset;
8634        image_state->binding.size = image_state->requirements.size;
8635    }
8636}
8637
8638VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
8639    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8640    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8641    auto image_state = GetImageState(dev_data, image);
8642    bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
8643    if (!skip) {
8644        result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
8645        if (result == VK_SUCCESS) {
8646            PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
8647        }
8648    }
8649    return result;
8650}
8651
8652VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
8653    bool skip = false;
8654    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8655    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8656    unique_lock_t lock(global_lock);
8657    auto event_state = GetEventNode(dev_data, event);
8658    if (event_state) {
8659        event_state->needsSignaled = false;
8660        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
8661        if (event_state->write_in_use) {
8662            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8663                            HandleToUint64(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8664                            "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
8665                            HandleToUint64(event));
8666        }
8667    }
8668    lock.unlock();
8669    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
8670    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
8671    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
8672    for (auto queue_data : dev_data->queueMap) {
8673        auto event_entry = queue_data.second.eventToStageMap.find(event);
8674        if (event_entry != queue_data.second.eventToStageMap.end()) {
8675            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
8676        }
8677    }
8678    if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
8679    return result;
8680}
8681
8682VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
8683                                               VkFence fence) {
8684    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8685    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8686    bool skip = false;
8687    unique_lock_t lock(global_lock);
8688    auto pFence = GetFenceNode(dev_data, fence);
8689    auto pQueue = GetQueueState(dev_data, queue);
8690
8691    // First verify that fence is not in use
8692    skip |= ValidateFenceForSubmit(dev_data, pFence);
8693
8694    if (pFence) {
8695        SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
8696    }
8697
8698    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
8699        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
8700        // Track objects tied to memory
8701        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
8702            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
8703                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
8704                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
8705                                        HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer))
8706                    skip = true;
8707            }
8708        }
8709        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
8710            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
8711                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
8712                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
8713                                        HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage))
8714                    skip = true;
8715            }
8716        }
8717        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
8718            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
8719                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
8720                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
8721                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
8722                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
8723                                        HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage))
8724                    skip = true;
8725            }
8726        }
8727
8728        std::vector<SEMAPHORE_WAIT> semaphore_waits;
8729        std::vector<VkSemaphore> semaphore_signals;
8730        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
8731            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
8732            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
8733            if (pSemaphore) {
8734                if (pSemaphore->signaled) {
8735                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
8736                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
8737                        pSemaphore->in_use.fetch_add(1);
8738                    }
8739                    pSemaphore->signaler.first = VK_NULL_HANDLE;
8740                    pSemaphore->signaled = false;
8741                } else {
8742                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
8743                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8744                                    "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64
8745                                    " that has no way to be signaled.",
8746                                    queue, HandleToUint64(semaphore));
8747                }
8748            }
8749        }
8750        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
8751            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
8752            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
8753            if (pSemaphore) {
8754                if (pSemaphore->signaled) {
8755                    skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
8756                                   HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8757                                   "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
8758                                   ", but that semaphore is already signaled.",
8759                                   queue, HandleToUint64(semaphore));
8760                } else {
8761                    pSemaphore->signaler.first = queue;
8762                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
8763                    pSemaphore->signaled = true;
8764                    pSemaphore->in_use.fetch_add(1);
8765                    semaphore_signals.push_back(semaphore);
8766                }
8767            }
8768        }
8769
8770        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals,
8771                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
8772    }
8773
8774    if (pFence && !bindInfoCount) {
8775        // No work to do, just dropping a fence in the queue by itself.
8776        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
8777                                         fence);
8778    }
8779
8780    lock.unlock();
8781
8782    if (!skip) return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
8783
8784    return result;
8785}
8786
8787VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
8788                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
8789    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8790    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
8791    if (result == VK_SUCCESS) {
8792        lock_guard_t lock(global_lock);
8793        SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
8794        sNode->signaler.first = VK_NULL_HANDLE;
8795        sNode->signaler.second = 0;
8796        sNode->signaled = false;
8797    }
8798    return result;
8799}
8800
8801VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
8802                                           const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
8803    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8804    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
8805    if (result == VK_SUCCESS) {
8806        lock_guard_t lock(global_lock);
8807        dev_data->eventMap[*pEvent].needsSignaled = false;
8808        dev_data->eventMap[*pEvent].write_in_use = 0;
8809        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
8810    }
8811    return result;
8812}
8813
8814static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
8815                                              VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
8816                                              SWAPCHAIN_NODE *old_swapchain_state) {
8817    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
8818
8819    // TODO: revisit this. some of these rules are being relaxed.
8820
8821    // All physical devices and queue families are required to be able
8822    // to present to any native window on Android; require the
8823    // application to have established support on any other platform.
8824    if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
8825        auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::const_reference qs) -> bool {
8826            // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
8827            return (qs.first.gpu == dev_data->physical_device) && qs.second;
8828        };
8829        const auto& support = surface_state->gpu_queue_support;
8830        bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
8831
8832        if (!is_supported) {
8833            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8834                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ec, "DS",
8835                        "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. "
8836                        "The vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support "
8837                        "with this surface for at least one queue family of this device. %s",
8838                        func_name, validation_error_map[VALIDATION_ERROR_146009ec]))
8839                return true;
8840        }
8841    }
8842
8843    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
8844        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8845                    HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
8846                    "%s: surface has an existing swapchain other than oldSwapchain", func_name))
8847            return true;
8848    }
8849    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
8850        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8851                    HandleToUint64(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE, "DS",
8852                    "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
8853            return true;
8854    }
8855    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
8856    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
8857        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
8858                    HandleToUint64(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8859                    "%s: surface capabilities not retrieved for this physical device", func_name))
8860            return true;
8861    } else {  // have valid capabilities
8862        auto &capabilities = physical_device_state->surfaceCapabilities;
8863        // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
8864        if (pCreateInfo->minImageCount < capabilities.minImageCount) {
8865            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8866                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ee, "DS",
8867                        "%s called with minImageCount = %d, which is outside the bounds returned "
8868                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
8869                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
8870                        validation_error_map[VALIDATION_ERROR_146009ee]))
8871                return true;
8872        }
8873
8874        if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
8875            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8876                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f0, "DS",
8877                        "%s called with minImageCount = %d, which is outside the bounds returned "
8878                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
8879                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
8880                        validation_error_map[VALIDATION_ERROR_146009f0]))
8881                return true;
8882        }
8883
8884        // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
8885        if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
8886            (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
8887            (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
8888            (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
8889            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8890                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f4, "DS",
8891                        "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
8892                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
8893                        "maxImageExtent = (%d,%d). %s",
8894                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
8895                        capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
8896                        capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
8897                        validation_error_map[VALIDATION_ERROR_146009f4]))
8898                return true;
8899        }
8900        // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
8901        // VkSurfaceCapabilitiesKHR::supportedTransforms.
8902        if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
8903            !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
8904            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
8905            // it up a little at a time, and then log it:
8906            std::string errorString = "";
8907            char str[1024];
8908            // Here's the first part of the message:
8909            sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
8910                    string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
8911            errorString += str;
8912            for (int i = 0; i < 32; i++) {
8913                // Build up the rest of the message:
8914                if ((1 << i) & capabilities.supportedTransforms) {
8915                    const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
8916                    sprintf(str, "    %s\n", newStr);
8917                    errorString += str;
8918                }
8919            }
8920            // Log the message that we've built up:
8921            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8922                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009fe, "DS", "%s. %s", errorString.c_str(),
8923                        validation_error_map[VALIDATION_ERROR_146009fe]))
8924                return true;
8925        }
8926
8927        // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
8928        // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
8929        if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
8930            !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
8931            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
8932            // it up a little at a time, and then log it:
8933            std::string errorString = "";
8934            char str[1024];
8935            // Here's the first part of the message:
8936            sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
8937                    func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
8938            errorString += str;
8939            for (int i = 0; i < 32; i++) {
8940                // Build up the rest of the message:
8941                if ((1 << i) & capabilities.supportedCompositeAlpha) {
8942                    const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
8943                    sprintf(str, "    %s\n", newStr);
8944                    errorString += str;
8945                }
8946            }
8947            // Log the message that we've built up:
8948            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8949                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a00, "DS", "%s. %s", errorString.c_str(),
8950                        validation_error_map[VALIDATION_ERROR_14600a00]))
8951                return true;
8952        }
8953        // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
8954        if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
8955            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8956                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f6, "DS",
8957                        "%s called with a non-supported imageArrayLayers (i.e. %d).  Minimum value is 1, maximum value is %d. %s",
8958                        func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
8959                        validation_error_map[VALIDATION_ERROR_146009f6]))
8960                return true;
8961        }
8962        // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
8963        if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
8964            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8965                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f8, "DS",
8966                        "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
8967                        func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
8968                        validation_error_map[VALIDATION_ERROR_146009f8]))
8969                return true;
8970        }
8971    }
8972
8973    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
8974    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
8975        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8976                    HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8977                    "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
8978            return true;
8979    } else {
8980        // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
8981        bool foundFormat = false;
8982        bool foundColorSpace = false;
8983        bool foundMatch = false;
8984        for (auto const &format : physical_device_state->surface_formats) {
8985            if (pCreateInfo->imageFormat == format.format) {
8986                // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
8987                foundFormat = true;
8988                if (pCreateInfo->imageColorSpace == format.colorSpace) {
8989                    foundMatch = true;
8990                    break;
8991                }
8992            } else {
8993                if (pCreateInfo->imageColorSpace == format.colorSpace) {
8994                    foundColorSpace = true;
8995                }
8996            }
8997        }
8998        if (!foundMatch) {
8999            if (!foundFormat) {
9000                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9001                            HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
9002                            "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s", func_name,
9003                            pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_146009f2]))
9004                    return true;
9005            }
9006            if (!foundColorSpace) {
9007                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9008                            HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
9009                            "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s", func_name,
9010                            pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_146009f2]))
9011                    return true;
9012            }
9013        }
9014    }
9015
9016    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
9017    if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
9018        // FIFO is required to always be supported
9019        if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
9020            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9021                        HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
9022                        "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
9023                return true;
9024        }
9025    } else {
9026        // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
9027        bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
9028                                    pCreateInfo->presentMode) != physical_device_state->present_modes.end();
9029        if (!foundMatch) {
9030            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9031                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a02, "DS",
9032                        "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
9033                        string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_14600a02]))
9034                return true;
9035        }
9036    }
9037    // Validate state for shared presentable case
9038    if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
9039        VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
9040        if (!dev_data->extensions.vk_khr_shared_presentable_image) {
9041            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9042                        HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_EXTENSION_NOT_ENABLED, "DS",
9043                        "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
9044                        "been enabled.",
9045                        func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
9046                return true;
9047        } else if (pCreateInfo->minImageCount != 1) {
9048            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9049                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_14600ace, "DS",
9050                        "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
9051                        "must be 1. %s",
9052                        func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount,
9053                        validation_error_map[VALIDATION_ERROR_14600ace]))
9054                return true;
9055        }
9056    }
9057
9058    return false;
9059}
9060
9061static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
9062                                             VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
9063                                             SWAPCHAIN_NODE *old_swapchain_state) {
9064    if (VK_SUCCESS == result) {
9065        lock_guard_t lock(global_lock);
9066        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
9067        if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
9068            VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
9069            swapchain_state->shared_presentable = true;
9070        }
9071        surface_state->swapchain = swapchain_state.get();
9072        dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
9073    } else {
9074        surface_state->swapchain = nullptr;
9075    }
9076    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
9077    if (old_swapchain_state) {
9078        old_swapchain_state->replaced = true;
9079    }
9080    surface_state->old_swapchain = old_swapchain_state;
9081    return;
9082}
9083
9084VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
9085                                                  const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
9086    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9087    auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
9088    auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
9089
9090    if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
9091        return VK_ERROR_VALIDATION_FAILED_EXT;
9092    }
9093
9094    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
9095
9096    PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
9097
9098    return result;
9099}
9100
9101VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
9102    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9103    bool skip = false;
9104
9105    unique_lock_t lock(global_lock);
9106    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
9107    if (swapchain_data) {
9108        if (swapchain_data->images.size() > 0) {
9109            for (auto swapchain_image : swapchain_data->images) {
9110                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
9111                if (image_sub != dev_data->imageSubresourceMap.end()) {
9112                    for (auto imgsubpair : image_sub->second) {
9113                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
9114                        if (image_item != dev_data->imageLayoutMap.end()) {
9115                            dev_data->imageLayoutMap.erase(image_item);
9116                        }
9117                    }
9118                    dev_data->imageSubresourceMap.erase(image_sub);
9119                }
9120                skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
9121                dev_data->imageMap.erase(swapchain_image);
9122            }
9123        }
9124
9125        auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
9126        if (surface_state) {
9127            if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
9128            if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
9129        }
9130
9131        dev_data->swapchainMap.erase(swapchain);
9132    }
9133    lock.unlock();
9134    if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
9135}
9136
9137static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
9138                                                 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
9139    bool skip = false;
9140    if (swapchain_state && pSwapchainImages) {
9141        lock_guard_t lock(global_lock);
9142        // Compare the preliminary value of *pSwapchainImageCount with the value this time:
9143        if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
9144            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9145                            HandleToUint64(device), __LINE__, SWAPCHAIN_PRIOR_COUNT, "DS",
9146                            "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive "
9147                            "value has been seen for pSwapchainImages.");
9148        } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
9149            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9150                            HandleToUint64(device), __LINE__, SWAPCHAIN_INVALID_COUNT, "DS",
9151                            "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with "
9152                            "pSwapchainImages set to a value (%d) that is greater than the value (%d) that was returned when "
9153                            "pSwapchainImageCount was NULL.",
9154                            *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
9155        }
9156    }
9157    return skip;
9158}
9159
9160static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
9161                                                uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
9162    lock_guard_t lock(global_lock);
9163
9164    if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
9165
9166    if (pSwapchainImages) {
9167        if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
9168            swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
9169        }
9170        for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
9171            if (swapchain_state->images[i] != VK_NULL_HANDLE) continue;  // Already retrieved this.
9172
9173            IMAGE_LAYOUT_NODE image_layout_node;
9174            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
9175            image_layout_node.format = swapchain_state->createInfo.imageFormat;
9176            // Add imageMap entries for each swapchain image
9177            VkImageCreateInfo image_ci = {};
9178            image_ci.flags = 0;
9179            image_ci.imageType = VK_IMAGE_TYPE_2D;
9180            image_ci.format = swapchain_state->createInfo.imageFormat;
9181            image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
9182            image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
9183            image_ci.extent.depth = 1;
9184            image_ci.mipLevels = 1;
9185            image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
9186            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
9187            image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
9188            image_ci.usage = swapchain_state->createInfo.imageUsage;
9189            image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
9190            device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
9191            auto &image_state = device_data->imageMap[pSwapchainImages[i]];
9192            image_state->valid = false;
9193            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
9194            swapchain_state->images[i] = pSwapchainImages[i];
9195            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
9196            device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
9197            device_data->imageLayoutMap[subpair] = image_layout_node;
9198        }
9199    }
9200
9201    if (*pSwapchainImageCount) {
9202        if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
9203            swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
9204        }
9205        swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
9206    }
9207}
9208
9209VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
9210                                                     VkImage *pSwapchainImages) {
9211    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9212    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9213
9214    auto swapchain_state = GetSwapchainNode(device_data, swapchain);
9215    bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
9216
9217    if (!skip) {
9218        result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
9219    }
9220
9221    if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
9222        PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
9223    }
9224    return result;
9225}
9226
9227VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
9228    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
9229    bool skip = false;
9230
9231    lock_guard_t lock(global_lock);
9232    auto queue_state = GetQueueState(dev_data, queue);
9233
9234    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9235        auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
9236        if (pSemaphore && !pSemaphore->signaled) {
9237            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
9238                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9239                            "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
9240                            HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
9241        }
9242    }
9243
9244    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9245        auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9246        if (swapchain_data) {
9247            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
9248                skip |=
9249                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9250                            HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9251                            "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
9252                            pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
9253            } else {
9254                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
9255                auto image_state = GetImageState(dev_data, image);
9256
9257                if (image_state->shared_presentable) {
9258                    image_state->layout_locked = true;
9259                }
9260
9261                skip |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
9262
9263                if (!image_state->acquired) {
9264                    skip |= log_msg(
9265                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9266                        HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED, "DS",
9267                        "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
9268                }
9269
9270                vector<VkImageLayout> layouts;
9271                if (FindLayouts(dev_data, image, layouts)) {
9272                    for (auto layout : layouts) {
9273                        if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) &&
9274                            (!dev_data->extensions.vk_khr_shared_presentable_image ||
9275                             (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
9276                            skip |=
9277                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
9278                                        HandleToUint64(queue), __LINE__, VALIDATION_ERROR_11200a20, "DS",
9279                                        "Images passed to present must be in layout "
9280                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s. %s",
9281                                        string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_11200a20]);
9282                        }
9283                    }
9284                }
9285            }
9286
9287            // All physical devices and queue families are required to be able
9288            // to present to any native window on Android; require the
9289            // application to have established support on any other platform.
9290            if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
9291                auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
9292                auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
9293
9294                if (support_it == surface_state->gpu_queue_support.end()) {
9295                    skip |=
9296                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9297                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS",
9298                                "vkQueuePresentKHR: Presenting image without calling "
9299                                "vkGetPhysicalDeviceSurfaceSupportKHR");
9300                } else if (!support_it->second) {
9301                    skip |=
9302                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9303                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_31800a18, "DS",
9304                                "vkQueuePresentKHR: Presenting image on queue that cannot "
9305                                "present to this surface. %s",
9306                                validation_error_map[VALIDATION_ERROR_31800a18]);
9307                }
9308            }
9309        }
9310    }
9311    if (pPresentInfo && pPresentInfo->pNext) {
9312        // Verify ext struct
9313        struct std_header {
9314            VkStructureType sType;
9315            const void *pNext;
9316        };
9317        std_header *pnext = (std_header *)pPresentInfo->pNext;
9318        while (pnext) {
9319            if (VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR == pnext->sType) {
9320                VkPresentRegionsKHR *present_regions = (VkPresentRegionsKHR *)pnext;
9321                for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
9322                    auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9323                    assert(swapchain_data);
9324                    VkPresentRegionKHR region = present_regions->pRegions[i];
9325                    for (uint32_t j = 0; j < region.rectangleCount; ++j) {
9326                        VkRectLayerKHR rect = region.pRectangles[j];
9327                        // TODO: Need to update these errors to their unique error ids when available
9328                        if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
9329                            skip |= log_msg(
9330                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9331                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9332                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
9333                                "chain, pRegion[%i].pRectangles[%i], the sum of offset.x "
9334                                "(%i) and extent.width (%i) is greater than the "
9335                                "corresponding swapchain's imageExtent.width (%i).",
9336                                i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
9337                        }
9338                        if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
9339                            skip |= log_msg(
9340                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9341                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9342                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
9343                                "chain, pRegion[%i].pRectangles[%i], the sum of offset.y "
9344                                "(%i) and extent.height (%i) is greater than the "
9345                                "corresponding swapchain's imageExtent.height (%i).",
9346                                i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
9347                        }
9348                        if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
9349                            skip |= log_msg(
9350                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9351                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9352                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the "
9353                                "layer (%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
9354                                i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
9355                        }
9356                    }
9357                }
9358            } else if (VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE == pnext->sType) {
9359                VkPresentTimesInfoGOOGLE *present_times_info = (VkPresentTimesInfoGOOGLE *)pnext;
9360                if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
9361                    skip |=
9362                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9363                                HandleToUint64(pPresentInfo->pSwapchains[0]), __LINE__,
9364
9365                                VALIDATION_ERROR_118009be, "DS",
9366                                "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but "
9367                                "pPresentInfo->swapchainCount is %i. For VkPresentTimesInfoGOOGLE down pNext "
9368                                "chain of VkPresentInfoKHR, VkPresentTimesInfoGOOGLE.swapchainCount "
9369                                "must equal VkPresentInfoKHR.swapchainCount.",
9370                                present_times_info->swapchainCount, pPresentInfo->swapchainCount);
9371                }
9372            }
9373            pnext = (std_header *)pnext->pNext;
9374        }
9375    }
9376
9377    if (skip) {
9378        return VK_ERROR_VALIDATION_FAILED_EXT;
9379    }
9380
9381    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
9382
9383    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
9384        // Semaphore waits occur before error generation, if the call reached
9385        // the ICD. (Confirm?)
9386        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9387            auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
9388            if (pSemaphore) {
9389                pSemaphore->signaler.first = VK_NULL_HANDLE;
9390                pSemaphore->signaled = false;
9391            }
9392        }
9393
9394        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9395            // Note: this is imperfect, in that we can get confused about what
9396            // did or didn't succeed-- but if the app does that, it's confused
9397            // itself just as much.
9398            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
9399
9400            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
9401
9402            // Mark the image as having been released to the WSI
9403            auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9404            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
9405            auto image_state = GetImageState(dev_data, image);
9406            image_state->acquired = false;
9407        }
9408
9409        // Note: even though presentation is directed to a queue, there is no
9410        // direct ordering between QP and subsequent work, so QP (and its
9411        // semaphore waits) /never/ participate in any completion proof.
9412    }
9413
9414    return result;
9415}
9416
9417static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
9418                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
9419                                                     std::vector<SURFACE_STATE *> &surface_state,
9420                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
9421    if (pCreateInfos) {
9422        lock_guard_t lock(global_lock);
9423        for (uint32_t i = 0; i < swapchainCount; i++) {
9424            surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
9425            old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
9426            std::stringstream func_name;
9427            func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
9428            if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
9429                                                  old_swapchain_state[i])) {
9430                return true;
9431            }
9432        }
9433    }
9434    return false;
9435}
9436
9437static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
9438                                                    const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
9439                                                    std::vector<SURFACE_STATE *> &surface_state,
9440                                                    std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
9441    if (VK_SUCCESS == result) {
9442        for (uint32_t i = 0; i < swapchainCount; i++) {
9443            auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
9444            if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
9445                VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
9446                swapchain_state->shared_presentable = true;
9447            }
9448            surface_state[i]->swapchain = swapchain_state.get();
9449            dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
9450        }
9451    } else {
9452        for (uint32_t i = 0; i < swapchainCount; i++) {
9453            surface_state[i]->swapchain = nullptr;
9454        }
9455    }
9456    // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
9457    for (uint32_t i = 0; i < swapchainCount; i++) {
9458        if (old_swapchain_state[i]) {
9459            old_swapchain_state[i]->replaced = true;
9460        }
9461        surface_state[i]->old_swapchain = old_swapchain_state[i];
9462    }
9463    return;
9464}
9465
9466VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
9467                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
9468                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
9469    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9470    std::vector<SURFACE_STATE *> surface_state;
9471    std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
9472
9473    if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
9474                                                 old_swapchain_state)) {
9475        return VK_ERROR_VALIDATION_FAILED_EXT;
9476    }
9477
9478    VkResult result =
9479        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
9480
9481    PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
9482                                            old_swapchain_state);
9483
9484    return result;
9485}
9486
9487VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
9488                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
9489    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9490    bool skip = false;
9491
9492    unique_lock_t lock(global_lock);
9493
9494    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
9495        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9496                        HandleToUint64(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
9497                        "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
9498                        "to determine the completion of this operation.");
9499    }
9500
9501    auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9502    if (pSemaphore && pSemaphore->signaled) {
9503        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9504                        HandleToUint64(semaphore), __LINE__, VALIDATION_ERROR_16400a0c, "DS",
9505                        "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
9506                        validation_error_map[VALIDATION_ERROR_16400a0c]);
9507    }
9508
9509    auto pFence = GetFenceNode(dev_data, fence);
9510    if (pFence) {
9511        skip |= ValidateFenceForSubmit(dev_data, pFence);
9512    }
9513
9514    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
9515
9516    if (swapchain_data->replaced) {
9517        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9518                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
9519                        "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
9520                        "present any images it has acquired, but cannot acquire any more.");
9521    }
9522
9523    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
9524    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
9525        uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
9526                                                 [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
9527        if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
9528            skip |=
9529                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9530                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
9531                        "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
9532                        acquired_images);
9533        }
9534    }
9535
9536    if (swapchain_data->images.size() == 0) {
9537        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9538                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
9539                        "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
9540                        "vkGetSwapchainImagesKHR after swapchain creation.");
9541    }
9542
9543    lock.unlock();
9544
9545    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9546
9547    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
9548
9549    lock.lock();
9550    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
9551        if (pFence) {
9552            pFence->state = FENCE_INFLIGHT;
9553            pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
9554        }
9555
9556        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
9557        if (pSemaphore) {
9558            pSemaphore->signaled = true;
9559            pSemaphore->signaler.first = VK_NULL_HANDLE;
9560        }
9561
9562        // Mark the image as acquired.
9563        auto image = swapchain_data->images[*pImageIndex];
9564        auto image_state = GetImageState(dev_data, image);
9565        image_state->acquired = true;
9566        image_state->shared_presentable = swapchain_data->shared_presentable;
9567    }
9568    lock.unlock();
9569
9570    return result;
9571}
9572
9573VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
9574                                                        VkPhysicalDevice *pPhysicalDevices) {
9575    bool skip = false;
9576    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9577    assert(instance_data);
9578
9579    // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
9580    if (NULL == pPhysicalDevices) {
9581        instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
9582    } else {
9583        if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
9584            // Flag warning here. You can call this without having queried the count, but it may not be
9585            // robust on platforms with multiple physical devices.
9586            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
9587                            0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9588                            "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
9589                            "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
9590        }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
9591        else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
9592            // Having actual count match count from app is not a requirement, so this can be a warning
9593            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9594                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9595                            "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
9596                            "supported by this instance is %u.",
9597                            *pPhysicalDeviceCount, instance_data->physical_devices_count);
9598        }
9599        instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
9600    }
9601    if (skip) {
9602        return VK_ERROR_VALIDATION_FAILED_EXT;
9603    }
9604    VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
9605    if (NULL == pPhysicalDevices) {
9606        instance_data->physical_devices_count = *pPhysicalDeviceCount;
9607    } else if (result == VK_SUCCESS) {  // Save physical devices
9608        for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
9609            auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
9610            phys_device_state.phys_device = pPhysicalDevices[i];
9611            // Init actual features for each physical device
9612            instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
9613        }
9614    }
9615    return result;
9616}
9617
9618// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
9619static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
9620                                                                 PHYSICAL_DEVICE_STATE *pd_state,
9621                                                                 uint32_t requested_queue_family_property_count, bool qfp_null,
9622                                                                 const char *caller_name) {
9623    bool skip = false;
9624    if (!qfp_null) {
9625        // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
9626        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
9627            skip |= log_msg(
9628                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9629                HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9630                "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
9631                "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
9632                caller_name, caller_name);
9633            // Then verify that pCount that is passed in on second call matches what was returned
9634        } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
9635            skip |= log_msg(
9636                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9637                HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9638                "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
9639                ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
9640                ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
9641                "previously obtained by calling %s with NULL pQueueFamilyProperties.",
9642                caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
9643        }
9644        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
9645    }
9646
9647    return skip;
9648}
9649
9650static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
9651                                                                  PHYSICAL_DEVICE_STATE *pd_state,
9652                                                                  uint32_t *pQueueFamilyPropertyCount,
9653                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
9654    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
9655                                                                (nullptr == pQueueFamilyProperties),
9656                                                                "vkGetPhysicalDeviceQueueFamilyProperties()");
9657}
9658
9659static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_layer_data *instance_data,
9660                                                                      PHYSICAL_DEVICE_STATE *pd_state,
9661                                                                      uint32_t *pQueueFamilyPropertyCount,
9662                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9663    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
9664                                                                (nullptr == pQueueFamilyProperties),
9665                                                                "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
9666}
9667
9668// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
9669static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9670                                                                    VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9671    if (!pQueueFamilyProperties) {
9672        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
9673            pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
9674        pd_state->queue_family_count = count;
9675    } else {  // Save queue family properties
9676        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
9677        pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
9678
9679        pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
9680        for (uint32_t i = 0; i < count; ++i) {
9681            pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
9682        }
9683    }
9684}
9685
9686static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9687                                                                 VkQueueFamilyProperties *pQueueFamilyProperties) {
9688    VkQueueFamilyProperties2KHR *pqfp = nullptr;
9689    std::vector<VkQueueFamilyProperties2KHR> qfp;
9690    qfp.resize(count);
9691    if (pQueueFamilyProperties) {
9692        for (uint32_t i = 0; i < count; ++i) {
9693            qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
9694            qfp[i].pNext = nullptr;
9695            qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
9696        }
9697        pqfp = qfp.data();
9698    }
9699    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
9700}
9701
9702static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9703                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9704    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
9705}
9706
9707VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
9708                                                                  uint32_t *pQueueFamilyPropertyCount,
9709                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
9710    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9711    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9712    assert(physical_device_state);
9713    unique_lock_t lock(global_lock);
9714
9715    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
9716                                                                      pQueueFamilyPropertyCount, pQueueFamilyProperties);
9717
9718    lock.unlock();
9719
9720    if (skip) return;
9721
9722    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
9723                                                                         pQueueFamilyProperties);
9724
9725    lock.lock();
9726    PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
9727}
9728
9729VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
9730                                                                      uint32_t *pQueueFamilyPropertyCount,
9731                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9732    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9733    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9734    assert(physical_device_state);
9735    unique_lock_t lock(global_lock);
9736
9737    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_data, physical_device_state,
9738                                                                          pQueueFamilyPropertyCount, pQueueFamilyProperties);
9739
9740    lock.unlock();
9741
9742    if (skip) return;
9743
9744    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
9745                                                                             pQueueFamilyProperties);
9746
9747    lock.lock();
9748    PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(physical_device_state, *pQueueFamilyPropertyCount,
9749                                                             pQueueFamilyProperties);
9750}
9751
9752template <typename TCreateInfo, typename FPtr>
9753static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
9754                              VkSurfaceKHR *pSurface, FPtr fptr) {
9755    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9756
9757    // Call down the call chain:
9758    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
9759
9760    if (result == VK_SUCCESS) {
9761        unique_lock_t lock(global_lock);
9762        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
9763        lock.unlock();
9764    }
9765
9766    return result;
9767}
9768
9769VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
9770    bool skip = false;
9771    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9772    unique_lock_t lock(global_lock);
9773    auto surface_state = GetSurfaceState(instance_data, surface);
9774
9775    if ((surface_state) && (surface_state->swapchain)) {
9776        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
9777            HandleToUint64(instance), __LINE__, VALIDATION_ERROR_26c009e4, "DS",
9778            "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed. %s",
9779            validation_error_map[VALIDATION_ERROR_26c009e4]);
9780    }
9781    instance_data->surface_map.erase(surface);
9782    lock.unlock();
9783    if (!skip) {
9784        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
9785    }
9786}
9787
9788VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
9789                                                            const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9790    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
9791}
9792
9793#ifdef VK_USE_PLATFORM_ANDROID_KHR
9794VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
9795                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9796    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
9797}
9798#endif  // VK_USE_PLATFORM_ANDROID_KHR
9799
9800#ifdef VK_USE_PLATFORM_MIR_KHR
9801VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
9802                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9803    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
9804}
9805
9806VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9807                                                                          uint32_t queueFamilyIndex, MirConnection *connection) {
9808    bool skip = false;
9809    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9810
9811    unique_lock_t lock(global_lock);
9812    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9813
9814    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2d2009e2,
9815                                              "vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
9816
9817    lock.unlock();
9818
9819    if (skip) return VK_FALSE;
9820
9821    // Call down the call chain:
9822    VkBool32 result =
9823        instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
9824
9825    return result;
9826}
9827#endif  // VK_USE_PLATFORM_MIR_KHR
9828
9829#ifdef VK_USE_PLATFORM_WAYLAND_KHR
9830VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
9831                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9832    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
9833}
9834
9835VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9836                                                                              uint32_t queueFamilyIndex,
9837                                                                              struct wl_display *display) {
9838    bool skip = false;
9839    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9840
9841    unique_lock_t lock(global_lock);
9842    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9843
9844    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f000a34,
9845                                              "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
9846
9847    lock.unlock();
9848
9849    if (skip) return VK_FALSE;
9850
9851    // Call down the call chain:
9852    VkBool32 result =
9853        instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
9854
9855    return result;
9856}
9857#endif  // VK_USE_PLATFORM_WAYLAND_KHR
9858
9859#ifdef VK_USE_PLATFORM_WIN32_KHR
9860VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
9861                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9862    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
9863}
9864
9865VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
9866                                                                            uint32_t queueFamilyIndex) {
9867    bool skip = false;
9868    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9869
9870    unique_lock_t lock(global_lock);
9871    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9872
9873    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f200a3a,
9874                                              "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
9875
9876    lock.unlock();
9877
9878    if (skip) return VK_FALSE;
9879
9880    // Call down the call chain:
9881    VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
9882
9883    return result;
9884}
9885#endif  // VK_USE_PLATFORM_WIN32_KHR
9886
9887#ifdef VK_USE_PLATFORM_XCB_KHR
9888VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
9889                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9890    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
9891}
9892
9893VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9894                                                                          uint32_t queueFamilyIndex, xcb_connection_t *connection,
9895                                                                          xcb_visualid_t visual_id) {
9896    bool skip = false;
9897    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9898
9899    unique_lock_t lock(global_lock);
9900    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9901
9902    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f400a40,
9903                                              "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
9904
9905    lock.unlock();
9906
9907    if (skip) return VK_FALSE;
9908
9909    // Call down the call chain:
9910    VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
9911                                                                                               connection, visual_id);
9912
9913    return result;
9914}
9915#endif  // VK_USE_PLATFORM_XCB_KHR
9916
9917#ifdef VK_USE_PLATFORM_XLIB_KHR
9918VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
9919                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9920    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
9921}
9922
9923VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9924                                                                           uint32_t queueFamilyIndex, Display *dpy,
9925                                                                           VisualID visualID) {
9926    bool skip = false;
9927    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9928
9929    unique_lock_t lock(global_lock);
9930    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9931
9932    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f600a46,
9933                                              "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
9934
9935    lock.unlock();
9936
9937    if (skip) return VK_FALSE;
9938
9939    // Call down the call chain:
9940    VkBool32 result =
9941        instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
9942
9943    return result;
9944}
9945#endif  // VK_USE_PLATFORM_XLIB_KHR
9946
9947VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9948                                                                       VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
9949    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9950
9951    unique_lock_t lock(global_lock);
9952    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9953    lock.unlock();
9954
9955    auto result =
9956        instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
9957
9958    if (result == VK_SUCCESS) {
9959        physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9960        physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
9961    }
9962
9963    return result;
9964}
9965
9966static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
9967                                                                   VkPhysicalDevice physicalDevice,
9968                                                                   VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
9969    unique_lock_t lock(global_lock);
9970    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9971    physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9972    physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
9973}
9974
9975VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
9976                                                                        const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
9977                                                                        VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
9978    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9979
9980    auto result =
9981        instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
9982
9983    if (result == VK_SUCCESS) {
9984        PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
9985    }
9986
9987    return result;
9988}
9989
9990static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
9991                                                                   VkPhysicalDevice physicalDevice,
9992                                                                   VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
9993    unique_lock_t lock(global_lock);
9994    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9995    physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9996    physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
9997    physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
9998    physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
9999    physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
10000    physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
10001    physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
10002    physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
10003    physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
10004    physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
10005    physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
10006}
10007
10008VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
10009                                                                        VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
10010    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10011
10012    auto result =
10013        instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
10014
10015    if (result == VK_SUCCESS) {
10016        PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
10017    }
10018
10019    return result;
10020}
10021
10022VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
10023                                                                  VkSurfaceKHR surface, VkBool32 *pSupported) {
10024    bool skip = false;
10025    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10026
10027    unique_lock_t lock(global_lock);
10028    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
10029    auto surface_state = GetSurfaceState(instance_data, surface);
10030
10031    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2ee009ea,
10032                                              "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
10033
10034    lock.unlock();
10035
10036    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
10037
10038    auto result =
10039        instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
10040
10041    if (result == VK_SUCCESS) {
10042        surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
10043    }
10044
10045    return result;
10046}
10047
10048VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
10049                                                                       uint32_t *pPresentModeCount,
10050                                                                       VkPresentModeKHR *pPresentModes) {
10051    bool skip = false;
10052    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10053    unique_lock_t lock(global_lock);
10054    // TODO: this isn't quite right. available modes may differ by surface AND physical device.
10055    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
10056    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
10057
10058    if (pPresentModes) {
10059        // Compare the preliminary value of *pPresentModeCount with the value this time:
10060        auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
10061        switch (call_state) {
10062            case UNCALLED:
10063                skip |= log_msg(
10064                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10065                    HandleToUint64(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
10066                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
10067                    "value has been seen for pPresentModeCount.");
10068                break;
10069            default:
10070                // both query count and query details
10071                if (*pPresentModeCount != prev_mode_count) {
10072                    skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10073                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
10074                                    DEVLIMITS_COUNT_MISMATCH, "DL",
10075                                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that "
10076                                    "differs from the value "
10077                                    "(%u) that was returned when pPresentModes was NULL.",
10078                                    *pPresentModeCount, prev_mode_count);
10079                }
10080                break;
10081        }
10082    }
10083    lock.unlock();
10084
10085    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
10086
10087    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
10088                                                                                        pPresentModes);
10089
10090    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
10091        lock.lock();
10092
10093        if (*pPresentModeCount) {
10094            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
10095            if (*pPresentModeCount > physical_device_state->present_modes.size())
10096                physical_device_state->present_modes.resize(*pPresentModeCount);
10097        }
10098        if (pPresentModes) {
10099            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
10100            for (uint32_t i = 0; i < *pPresentModeCount; i++) {
10101                physical_device_state->present_modes[i] = pPresentModes[i];
10102            }
10103        }
10104    }
10105
10106    return result;
10107}
10108
10109VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
10110                                                                  uint32_t *pSurfaceFormatCount,
10111                                                                  VkSurfaceFormatKHR *pSurfaceFormats) {
10112    bool skip = false;
10113    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10114    unique_lock_t lock(global_lock);
10115    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
10116    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
10117
10118    if (pSurfaceFormats) {
10119        auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
10120
10121        switch (call_state) {
10122            case UNCALLED:
10123                // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
10124                // didn't
10125                // previously call this function with a NULL value of pSurfaceFormats:
10126                skip |= log_msg(
10127                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10128                    HandleToUint64(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
10129                    "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
10130                    "value has been seen for pSurfaceFormats.");
10131                break;
10132            default:
10133                if (prev_format_count != *pSurfaceFormatCount) {
10134                    skip |= log_msg(
10135                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10136                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
10137                        DEVLIMITS_COUNT_MISMATCH, "DL",
10138                        "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats "
10139                        "set "
10140                        "to "
10141                        "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
10142                        *pSurfaceFormatCount, prev_format_count);
10143                }
10144                break;
10145        }
10146    }
10147    lock.unlock();
10148
10149    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
10150
10151    // Call down the call chain:
10152    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
10153                                                                                   pSurfaceFormats);
10154
10155    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
10156        lock.lock();
10157
10158        if (*pSurfaceFormatCount) {
10159            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
10160            if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
10161                physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
10162        }
10163        if (pSurfaceFormats) {
10164            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
10165            for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
10166                physical_device_state->surface_formats[i] = pSurfaceFormats[i];
10167            }
10168        }
10169    }
10170    return result;
10171}
10172
10173static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
10174                                                              uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
10175    unique_lock_t lock(global_lock);
10176    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
10177    if (*pSurfaceFormatCount) {
10178        if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
10179            physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
10180        }
10181        if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
10182            physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
10183    }
10184    if (pSurfaceFormats) {
10185        if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
10186            physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
10187        }
10188        for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
10189            physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
10190        }
10191    }
10192}
10193
10194VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
10195                                                                   const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
10196                                                                   uint32_t *pSurfaceFormatCount,
10197                                                                   VkSurfaceFormat2KHR *pSurfaceFormats) {
10198    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10199    auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
10200                                                                                   pSurfaceFormatCount, pSurfaceFormats);
10201    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
10202        PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
10203    }
10204    return result;
10205}
10206
10207VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
10208                                                            const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10209                                                            const VkAllocationCallbacks *pAllocator,
10210                                                            VkDebugReportCallbackEXT *pMsgCallback) {
10211    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10212    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10213    if (VK_SUCCESS == res) {
10214        lock_guard_t lock(global_lock);
10215        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
10216    }
10217    return res;
10218}
10219
10220VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
10221                                                         const VkAllocationCallbacks *pAllocator) {
10222    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10223    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10224    lock_guard_t lock(global_lock);
10225    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
10226}
10227
10228VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
10229                                                 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
10230                                                 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10231    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10232    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
10233}
10234
10235VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10236    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10237}
10238
10239VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
10240                                                              VkLayerProperties *pProperties) {
10241    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10242}
10243
10244VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
10245                                                                    VkExtensionProperties *pProperties) {
10246    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
10247        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
10248
10249    return VK_ERROR_LAYER_NOT_PRESENT;
10250}
10251
10252VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
10253                                                                  uint32_t *pCount, VkExtensionProperties *pProperties) {
10254    if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(0, NULL, pCount, pProperties);
10255
10256    assert(physicalDevice);
10257
10258    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10259    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
10260}
10261
10262VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHX(
10263    VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHX *pPhysicalDeviceGroupProperties) {
10264    bool skip = false;
10265    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10266
10267    if (instance_data) {
10268        // For this instance, flag when EnumeratePhysicalDeviceGroupsKHX goes to QUERY_COUNT and then QUERY_DETAILS.
10269        if (NULL == pPhysicalDeviceGroupProperties) {
10270            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
10271        } else {
10272            if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
10273                // Flag warning here. You can call this without having queried the count, but it may not be
10274                // robust on platforms with multiple physical devices.
10275                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10276                                VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
10277                                "Call sequence has vkEnumeratePhysicalDeviceGroupsKHX() w/ non-NULL "
10278                                "pPhysicalDeviceGroupProperties. You should first "
10279                                "call vkEnumeratePhysicalDeviceGroupsKHX() w/ NULL pPhysicalDeviceGroupProperties to query "
10280                                "pPhysicalDeviceGroupCount.");
10281            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
10282            else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
10283                // Having actual count match count from app is not a requirement, so this can be a warning
10284                skip |=
10285                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10286                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
10287                            "Call to vkEnumeratePhysicalDeviceGroupsKHX() w/ pPhysicalDeviceGroupCount value %u, but actual count "
10288                            "supported by this instance is %u.",
10289                            *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
10290            }
10291            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
10292        }
10293        if (skip) {
10294            return VK_ERROR_VALIDATION_FAILED_EXT;
10295        }
10296        VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHX(instance, pPhysicalDeviceGroupCount,
10297            pPhysicalDeviceGroupProperties);
10298        if (NULL == pPhysicalDeviceGroupProperties) {
10299            instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
10300        } else if (result == VK_SUCCESS) { // Save physical devices
10301            for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
10302                for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
10303                    VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
10304                    auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
10305                    phys_device_state.phys_device = cur_phys_dev;
10306                    // Init actual features for each physical device
10307                    instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
10308                }
10309            }
10310        }
10311        return result;
10312    } else {
10313        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
10314                DEVLIMITS_INVALID_INSTANCE, "DL",
10315                "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDeviceGroupsKHX().",
10316                HandleToUint64(instance));
10317    }
10318    return VK_ERROR_VALIDATION_FAILED_EXT;
10319}
10320
10321VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
10322                                                                 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
10323                                                                 const VkAllocationCallbacks *pAllocator,
10324                                                                 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
10325    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10326    VkResult result =
10327        dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
10328    if (VK_SUCCESS == result) {
10329        lock_guard_t lock(global_lock);
10330        // Shadow template createInfo for later updates
10331        safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info =
10332            new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo);
10333        std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
10334        dev_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
10335    }
10336    return result;
10337}
10338
10339VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
10340                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10341                                                              const VkAllocationCallbacks *pAllocator) {
10342    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10343    unique_lock_t lock(global_lock);
10344    dev_data->desc_template_map.erase(descriptorUpdateTemplate);
10345    lock.unlock();
10346    dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
10347}
10348
10349// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
10350static void PostCallRecordUpdateDescriptorSetWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
10351                                                             VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10352                                                             const void *pData) {
10353    auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
10354    if (template_map_entry == device_data->desc_template_map.end()) {
10355        assert(0);
10356    }
10357
10358    cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
10359}
10360
10361VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
10362                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10363                                                              const void *pData) {
10364    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10365    device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
10366
10367    PostCallRecordUpdateDescriptorSetWithTemplateKHR(device_data, descriptorSet, descriptorUpdateTemplate, pData);
10368}
10369
10370VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
10371                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10372                                                               VkPipelineLayout layout, uint32_t set, const void *pData) {
10373    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10374    dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
10375}
10376
10377static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
10378                                                                     VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
10379                                                                     VkDisplayPlanePropertiesKHR *pProperties) {
10380    unique_lock_t lock(global_lock);
10381    auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
10382
10383    if (*pPropertyCount) {
10384        if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
10385            physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
10386        }
10387        physical_device_state->display_plane_property_count = *pPropertyCount;
10388    }
10389    if (pProperties) {
10390        if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
10391            physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
10392        }
10393    }
10394}
10395
10396VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
10397                                                                          VkDisplayPlanePropertiesKHR *pProperties) {
10398    VkResult result = VK_SUCCESS;
10399    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10400
10401    result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
10402
10403    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
10404        PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
10405    }
10406
10407    return result;
10408}
10409
10410static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
10411                                                                    VkPhysicalDevice physicalDevice, uint32_t planeIndex,
10412                                                                    const char *api_name) {
10413    bool skip = false;
10414    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
10415    if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
10416        skip |= log_msg(
10417            instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10418            HandleToUint64(physicalDevice), __LINE__, SWAPCHAIN_GET_SUPPORTED_DISPLAYS_WITHOUT_QUERY, "DL",
10419            "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR.", api_name);
10420    } else {
10421        if (planeIndex >= physical_device_state->display_plane_property_count) {
10422            skip |= log_msg(
10423                instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10424                HandleToUint64(physicalDevice), __LINE__, VALIDATION_ERROR_29c009c2, "DL",
10425                "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR. "
10426                "Do you have the plane index hardcoded? %s",
10427                api_name, physical_device_state->display_plane_property_count - 1, validation_error_map[VALIDATION_ERROR_29c009c2]);
10428        }
10429    }
10430    return skip;
10431}
10432
10433static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
10434                                                               uint32_t planeIndex) {
10435    bool skip = false;
10436    lock_guard_t lock(global_lock);
10437    skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
10438                                                                    "vkGetDisplayPlaneSupportedDisplaysKHR");
10439    return skip;
10440}
10441
10442VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
10443                                                                   uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
10444    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10445    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10446    bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
10447    if (!skip) {
10448        result =
10449            instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
10450    }
10451    return result;
10452}
10453
10454static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
10455                                                          uint32_t planeIndex) {
10456    bool skip = false;
10457    lock_guard_t lock(global_lock);
10458    skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
10459                                                                    "vkGetDisplayPlaneCapabilitiesKHR");
10460    return skip;
10461}
10462
10463VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
10464                                                              uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
10465    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10466    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10467    bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
10468
10469    if (!skip) {
10470        result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
10471    }
10472
10473    return result;
10474}
10475
10476VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
10477    std::unique_lock<std::mutex> lock(global_lock);
10478    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10479    if (pNameInfo->pObjectName) {
10480        device_data->report_data->debugObjectNameMap->insert(
10481            std::make_pair<uint64_t, std::string>((uint64_t &&)pNameInfo->object, pNameInfo->pObjectName));
10482    } else {
10483        device_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
10484    }
10485    lock.unlock();
10486    VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
10487    return result;
10488}
10489
10490VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
10491    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10492    VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
10493    return result;
10494}
10495
10496VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
10497    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10498    device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
10499}
10500
10501VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
10502    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10503    device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
10504}
10505
10506VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
10507    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10508    device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
10509}
10510
10511VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
10512VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
10513VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
10514
10515// Map of all APIs to be intercepted by this layer
10516static const std::unordered_map<std::string, void*> name_to_funcptr_map = {
10517    {"vkGetInstanceProcAddr", (void*)GetInstanceProcAddr},
10518    {"vk_layerGetPhysicalDeviceProcAddr", (void*)GetPhysicalDeviceProcAddr},
10519    {"vkGetDeviceProcAddr", (void*)GetDeviceProcAddr},
10520    {"vkCreateInstance", (void*)CreateInstance},
10521    {"vkCreateDevice", (void*)CreateDevice},
10522    {"vkEnumeratePhysicalDevices", (void*)EnumeratePhysicalDevices},
10523    {"vkGetPhysicalDeviceQueueFamilyProperties", (void*)GetPhysicalDeviceQueueFamilyProperties},
10524    {"vkDestroyInstance", (void*)DestroyInstance},
10525    {"vkEnumerateInstanceLayerProperties", (void*)EnumerateInstanceLayerProperties},
10526    {"vkEnumerateDeviceLayerProperties", (void*)EnumerateDeviceLayerProperties},
10527    {"vkEnumerateInstanceExtensionProperties", (void*)EnumerateInstanceExtensionProperties},
10528    {"vkEnumerateDeviceExtensionProperties", (void*)EnumerateDeviceExtensionProperties},
10529    {"vkCreateDescriptorUpdateTemplateKHR", (void*)CreateDescriptorUpdateTemplateKHR},
10530    {"vkDestroyDescriptorUpdateTemplateKHR", (void*)DestroyDescriptorUpdateTemplateKHR},
10531    {"vkUpdateDescriptorSetWithTemplateKHR", (void*)UpdateDescriptorSetWithTemplateKHR},
10532    {"vkCmdPushDescriptorSetWithTemplateKHR", (void*)CmdPushDescriptorSetWithTemplateKHR},
10533    {"vkCreateSwapchainKHR", (void*)CreateSwapchainKHR},
10534    {"vkDestroySwapchainKHR", (void*)DestroySwapchainKHR},
10535    {"vkGetSwapchainImagesKHR", (void*)GetSwapchainImagesKHR},
10536    {"vkAcquireNextImageKHR", (void*)AcquireNextImageKHR},
10537    {"vkQueuePresentKHR", (void*)QueuePresentKHR},
10538    {"vkQueueSubmit", (void*)QueueSubmit},
10539    {"vkWaitForFences", (void*)WaitForFences},
10540    {"vkGetFenceStatus", (void*)GetFenceStatus},
10541    {"vkQueueWaitIdle", (void*)QueueWaitIdle},
10542    {"vkDeviceWaitIdle", (void*)DeviceWaitIdle},
10543    {"vkGetDeviceQueue", (void*)GetDeviceQueue},
10544    {"vkDestroyDevice", (void*)DestroyDevice},
10545    {"vkDestroyFence", (void*)DestroyFence},
10546    {"vkResetFences", (void*)ResetFences},
10547    {"vkDestroySemaphore", (void*)DestroySemaphore},
10548    {"vkDestroyEvent", (void*)DestroyEvent},
10549    {"vkDestroyQueryPool", (void*)DestroyQueryPool},
10550    {"vkDestroyBuffer", (void*)DestroyBuffer},
10551    {"vkDestroyBufferView", (void*)DestroyBufferView},
10552    {"vkDestroyImage", (void*)DestroyImage},
10553    {"vkDestroyImageView", (void*)DestroyImageView},
10554    {"vkDestroyShaderModule", (void*)DestroyShaderModule},
10555    {"vkDestroyPipeline", (void*)DestroyPipeline},
10556    {"vkDestroyPipelineLayout", (void*)DestroyPipelineLayout},
10557    {"vkDestroySampler", (void*)DestroySampler},
10558    {"vkDestroyDescriptorSetLayout", (void*)DestroyDescriptorSetLayout},
10559    {"vkDestroyDescriptorPool", (void*)DestroyDescriptorPool},
10560    {"vkDestroyFramebuffer", (void*)DestroyFramebuffer},
10561    {"vkDestroyRenderPass", (void*)DestroyRenderPass},
10562    {"vkCreateBuffer", (void*)CreateBuffer},
10563    {"vkCreateBufferView", (void*)CreateBufferView},
10564    {"vkCreateImage", (void*)CreateImage},
10565    {"vkCreateImageView", (void*)CreateImageView},
10566    {"vkCreateFence", (void*)CreateFence},
10567    {"vkCreatePipelineCache", (void*)CreatePipelineCache},
10568    {"vkDestroyPipelineCache", (void*)DestroyPipelineCache},
10569    {"vkGetPipelineCacheData", (void*)GetPipelineCacheData},
10570    {"vkMergePipelineCaches", (void*)MergePipelineCaches},
10571    {"vkCreateGraphicsPipelines", (void*)CreateGraphicsPipelines},
10572    {"vkCreateComputePipelines", (void*)CreateComputePipelines},
10573    {"vkCreateSampler", (void*)CreateSampler},
10574    {"vkCreateDescriptorSetLayout", (void*)CreateDescriptorSetLayout},
10575    {"vkCreatePipelineLayout", (void*)CreatePipelineLayout},
10576    {"vkCreateDescriptorPool", (void*)CreateDescriptorPool},
10577    {"vkResetDescriptorPool", (void*)ResetDescriptorPool},
10578    {"vkAllocateDescriptorSets", (void*)AllocateDescriptorSets},
10579    {"vkFreeDescriptorSets", (void*)FreeDescriptorSets},
10580    {"vkUpdateDescriptorSets", (void*)UpdateDescriptorSets},
10581    {"vkCreateCommandPool", (void*)CreateCommandPool},
10582    {"vkDestroyCommandPool", (void*)DestroyCommandPool},
10583    {"vkResetCommandPool", (void*)ResetCommandPool},
10584    {"vkCreateQueryPool", (void*)CreateQueryPool},
10585    {"vkAllocateCommandBuffers", (void*)AllocateCommandBuffers},
10586    {"vkFreeCommandBuffers", (void*)FreeCommandBuffers},
10587    {"vkBeginCommandBuffer", (void*)BeginCommandBuffer},
10588    {"vkEndCommandBuffer", (void*)EndCommandBuffer},
10589    {"vkResetCommandBuffer", (void*)ResetCommandBuffer},
10590    {"vkCmdBindPipeline", (void*)CmdBindPipeline},
10591    {"vkCmdSetViewport", (void*)CmdSetViewport},
10592    {"vkCmdSetScissor", (void*)CmdSetScissor},
10593    {"vkCmdSetLineWidth", (void*)CmdSetLineWidth},
10594    {"vkCmdSetDepthBias", (void*)CmdSetDepthBias},
10595    {"vkCmdSetBlendConstants", (void*)CmdSetBlendConstants},
10596    {"vkCmdSetDepthBounds", (void*)CmdSetDepthBounds},
10597    {"vkCmdSetStencilCompareMask", (void*)CmdSetStencilCompareMask},
10598    {"vkCmdSetStencilWriteMask", (void*)CmdSetStencilWriteMask},
10599    {"vkCmdSetStencilReference", (void*)CmdSetStencilReference},
10600    {"vkCmdBindDescriptorSets", (void*)CmdBindDescriptorSets},
10601    {"vkCmdBindVertexBuffers", (void*)CmdBindVertexBuffers},
10602    {"vkCmdBindIndexBuffer", (void*)CmdBindIndexBuffer},
10603    {"vkCmdDraw", (void*)CmdDraw},
10604    {"vkCmdDrawIndexed", (void*)CmdDrawIndexed},
10605    {"vkCmdDrawIndirect", (void*)CmdDrawIndirect},
10606    {"vkCmdDrawIndexedIndirect", (void*)CmdDrawIndexedIndirect},
10607    {"vkCmdDispatch", (void*)CmdDispatch},
10608    {"vkCmdDispatchIndirect", (void*)CmdDispatchIndirect},
10609    {"vkCmdCopyBuffer", (void*)CmdCopyBuffer},
10610    {"vkCmdCopyImage", (void*)CmdCopyImage},
10611    {"vkCmdBlitImage", (void*)CmdBlitImage},
10612    {"vkCmdCopyBufferToImage", (void*)CmdCopyBufferToImage},
10613    {"vkCmdCopyImageToBuffer", (void*)CmdCopyImageToBuffer},
10614    {"vkCmdUpdateBuffer", (void*)CmdUpdateBuffer},
10615    {"vkCmdFillBuffer", (void*)CmdFillBuffer},
10616    {"vkCmdClearColorImage", (void*)CmdClearColorImage},
10617    {"vkCmdClearDepthStencilImage", (void*)CmdClearDepthStencilImage},
10618    {"vkCmdClearAttachments", (void*)CmdClearAttachments},
10619    {"vkCmdResolveImage", (void*)CmdResolveImage},
10620    {"vkGetImageSubresourceLayout", (void*)GetImageSubresourceLayout},
10621    {"vkCmdSetEvent", (void*)CmdSetEvent},
10622    {"vkCmdResetEvent", (void*)CmdResetEvent},
10623    {"vkCmdWaitEvents", (void*)CmdWaitEvents},
10624    {"vkCmdPipelineBarrier", (void*)CmdPipelineBarrier},
10625    {"vkCmdBeginQuery", (void*)CmdBeginQuery},
10626    {"vkCmdEndQuery", (void*)CmdEndQuery},
10627    {"vkCmdResetQueryPool", (void*)CmdResetQueryPool},
10628    {"vkCmdCopyQueryPoolResults", (void*)CmdCopyQueryPoolResults},
10629    {"vkCmdPushConstants", (void*)CmdPushConstants},
10630    {"vkCmdWriteTimestamp", (void*)CmdWriteTimestamp},
10631    {"vkCreateFramebuffer", (void*)CreateFramebuffer},
10632    {"vkCreateShaderModule", (void*)CreateShaderModule},
10633    {"vkCreateRenderPass", (void*)CreateRenderPass},
10634    {"vkCmdBeginRenderPass", (void*)CmdBeginRenderPass},
10635    {"vkCmdNextSubpass", (void*)CmdNextSubpass},
10636    {"vkCmdEndRenderPass", (void*)CmdEndRenderPass},
10637    {"vkCmdExecuteCommands", (void*)CmdExecuteCommands},
10638    {"vkCmdDebugMarkerBeginEXT", (void*)CmdDebugMarkerBeginEXT},
10639    {"vkCmdDebugMarkerEndEXT", (void*)CmdDebugMarkerEndEXT},
10640    {"vkCmdDebugMarkerInsertEXT", (void*)CmdDebugMarkerInsertEXT},
10641    {"vkDebugMarkerSetObjectNameEXT", (void*)DebugMarkerSetObjectNameEXT},
10642    {"vkDebugMarkerSetObjectTagEXT", (void*)DebugMarkerSetObjectTagEXT},
10643    {"vkSetEvent", (void*)SetEvent},
10644    {"vkMapMemory", (void*)MapMemory},
10645    {"vkUnmapMemory", (void*)UnmapMemory},
10646    {"vkFlushMappedMemoryRanges", (void*)FlushMappedMemoryRanges},
10647    {"vkInvalidateMappedMemoryRanges", (void*)InvalidateMappedMemoryRanges},
10648    {"vkAllocateMemory", (void*)AllocateMemory},
10649    {"vkFreeMemory", (void*)FreeMemory},
10650    {"vkBindBufferMemory", (void*)BindBufferMemory},
10651    {"vkGetBufferMemoryRequirements", (void*)GetBufferMemoryRequirements},
10652    {"vkGetImageMemoryRequirements", (void*)GetImageMemoryRequirements},
10653    {"vkGetQueryPoolResults", (void*)GetQueryPoolResults},
10654    {"vkBindImageMemory", (void*)BindImageMemory},
10655    {"vkQueueBindSparse", (void*)QueueBindSparse},
10656    {"vkCreateSemaphore", (void*)CreateSemaphore},
10657    {"vkCreateEvent", (void*)CreateEvent},
10658#ifdef VK_USE_PLATFORM_ANDROID_KHR
10659    {"vkCreateAndroidSurfaceKHR", (void*)CreateAndroidSurfaceKHR},
10660#endif
10661#ifdef VK_USE_PLATFORM_MIR_KHR
10662    {"vkCreateMirSurfaceKHR", (void*)CreateMirSurfaceKHR},
10663    {"vkGetPhysicalDeviceMirPresentationSupportKHR", (void*)GetPhysicalDeviceMirPresentationSupportKHR},
10664#endif
10665#ifdef VK_USE_PLATFORM_WAYLAND_KHR
10666    {"vkCreateWaylandSurfaceKHR", (void*)CreateWaylandSurfaceKHR},
10667    {"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void*)GetPhysicalDeviceWaylandPresentationSupportKHR},
10668#endif
10669#ifdef VK_USE_PLATFORM_WIN32_KHR
10670    {"vkCreateWin32SurfaceKHR", (void*)CreateWin32SurfaceKHR},
10671    {"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void*)GetPhysicalDeviceWin32PresentationSupportKHR},
10672#endif
10673#ifdef VK_USE_PLATFORM_XCB_KHR
10674    {"vkCreateXcbSurfaceKHR", (void*)CreateXcbSurfaceKHR},
10675    {"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void*)GetPhysicalDeviceXcbPresentationSupportKHR},
10676#endif
10677#ifdef VK_USE_PLATFORM_XLIB_KHR
10678    {"vkCreateXlibSurfaceKHR", (void*)CreateXlibSurfaceKHR},
10679    {"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void*)GetPhysicalDeviceXlibPresentationSupportKHR},
10680#endif
10681    {"vkCreateDisplayPlaneSurfaceKHR", (void*)CreateDisplayPlaneSurfaceKHR},
10682    {"vkDestroySurfaceKHR", (void*)DestroySurfaceKHR},
10683    {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void*)GetPhysicalDeviceSurfaceCapabilitiesKHR},
10684    {"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void*)GetPhysicalDeviceSurfaceCapabilities2KHR},
10685    {"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void*)GetPhysicalDeviceSurfaceCapabilities2EXT},
10686    {"vkGetPhysicalDeviceSurfaceSupportKHR", (void*)GetPhysicalDeviceSurfaceSupportKHR},
10687    {"vkGetPhysicalDeviceSurfacePresentModesKHR", (void*)GetPhysicalDeviceSurfacePresentModesKHR},
10688    {"vkGetPhysicalDeviceSurfaceFormatsKHR", (void*)GetPhysicalDeviceSurfaceFormatsKHR},
10689    {"vkGetPhysicalDeviceSurfaceFormats2KHR", (void*)GetPhysicalDeviceSurfaceFormats2KHR},
10690    {"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void*)GetPhysicalDeviceQueueFamilyProperties2KHR},
10691    {"vkEnumeratePhysicalDeviceGroupsKHX", (void*)EnumeratePhysicalDeviceGroupsKHX},
10692    {"vkCreateDebugReportCallbackEXT", (void*)CreateDebugReportCallbackEXT},
10693    {"vkDestroyDebugReportCallbackEXT", (void*)DestroyDebugReportCallbackEXT},
10694    {"vkDebugReportMessageEXT", (void*)DebugReportMessageEXT},
10695    {"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void*)GetPhysicalDeviceDisplayPlanePropertiesKHR},
10696    {"GetDisplayPlaneSupportedDisplaysKHR", (void*)GetDisplayPlaneSupportedDisplaysKHR},
10697    {"GetDisplayPlaneCapabilitiesKHR", (void*)GetDisplayPlaneCapabilitiesKHR},
10698};
10699
10700VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
10701    assert(device);
10702    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10703
10704    // Is API to be intercepted by this layer?
10705    const auto &item = name_to_funcptr_map.find(funcName);
10706    if (item != name_to_funcptr_map.end()) {
10707        return reinterpret_cast<PFN_vkVoidFunction>(item->second);
10708    }
10709
10710    auto &table = device_data->dispatch_table;
10711    if (!table.GetDeviceProcAddr) return nullptr;
10712    return table.GetDeviceProcAddr(device, funcName);
10713}
10714
10715VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
10716    instance_layer_data *instance_data;
10717    // Is API to be intercepted by this layer?
10718    const auto &item = name_to_funcptr_map.find(funcName);
10719    if (item != name_to_funcptr_map.end()) {
10720        return reinterpret_cast<PFN_vkVoidFunction>(item->second);
10721    }
10722
10723    instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10724    auto &table = instance_data->dispatch_table;
10725    if (!table.GetInstanceProcAddr) return nullptr;
10726    return table.GetInstanceProcAddr(instance, funcName);
10727}
10728
10729VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
10730    assert(instance);
10731    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10732
10733    auto &table = instance_data->dispatch_table;
10734    if (!table.GetPhysicalDeviceProcAddr) return nullptr;
10735    return table.GetPhysicalDeviceProcAddr(instance, funcName);
10736}
10737
10738}  // namespace core_validation
10739
10740// loader-layer interface v0, just wrappers since there is only a layer
10741
10742VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
10743                                                                                      VkExtensionProperties *pProperties) {
10744    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
10745}
10746
10747VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
10748                                                                                  VkLayerProperties *pProperties) {
10749    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
10750}
10751
10752VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
10753                                                                                VkLayerProperties *pProperties) {
10754    // the layer command handles VK_NULL_HANDLE just fine internally
10755    assert(physicalDevice == VK_NULL_HANDLE);
10756    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
10757}
10758
10759VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10760                                                                                    const char *pLayerName, uint32_t *pCount,
10761                                                                                    VkExtensionProperties *pProperties) {
10762    // the layer command handles VK_NULL_HANDLE just fine internally
10763    assert(physicalDevice == VK_NULL_HANDLE);
10764    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10765}
10766
10767VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10768    return core_validation::GetDeviceProcAddr(dev, funcName);
10769}
10770
10771VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10772    return core_validation::GetInstanceProcAddr(instance, funcName);
10773}
10774
10775VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
10776                                                                                           const char *funcName) {
10777    return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
10778}
10779
10780VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
10781    assert(pVersionStruct != NULL);
10782    assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
10783
10784    // Fill in the function pointers if our version is at least capable of having the structure contain them.
10785    if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
10786        pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
10787        pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
10788        pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
10789    }
10790
10791    if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
10792        core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
10793    } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
10794        pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
10795    }
10796
10797    return VK_SUCCESS;
10798}
10799