core_validation.cpp revision 827e8708bfc431aa792cba005ebf9f1fe35cc7e3
1/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
34 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
39#include <algorithm>
40#include <assert.h>
41#include <iostream>
42#include <list>
43#include <map>
44#include <memory>
45#include <mutex>
46#include <set>
47#include <sstream>
48#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
51#include <string>
52#include <inttypes.h>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_enum_string_helper.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "core_validation.h"
64#include "buffer_validation.h"
65#include "shader_validation.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_extension_utils.h"
69#include "vk_layer_utils.h"
70
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
75#define LOGCONSOLE(...)      \
76    {                        \
77        printf(__VA_ARGS__); \
78        printf("\n");        \
79    }
80#endif
81
82// TODO: remove on NDK update (r15 will probably have proper STL impl)
83#ifdef __ANDROID__
84namespace std {
85
86template <typename T>
87std::string to_string(T var) {
88    std::ostringstream ss;
89    ss << var;
90    return ss.str();
91}
92}
93#endif
94
95// This intentionally includes a cpp file
96#include "vk_safe_struct.cpp"
97
98namespace core_validation {
99
100using std::unordered_map;
101using std::unordered_set;
102using std::unique_ptr;
103using std::vector;
104using std::string;
105using std::stringstream;
106using std::max;
107
108// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
109// Object value will be used to identify them internally.
110static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
111// 2nd special memory handle used to flag object as unbound from memory
112static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
113
114// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
115// by the extent of a swapchain targeting the surface.
116static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
117
118struct instance_layer_data {
119    VkInstance instance = VK_NULL_HANDLE;
120    debug_report_data *report_data = nullptr;
121    std::vector<VkDebugReportCallbackEXT> logging_callback;
122    VkLayerInstanceDispatchTable dispatch_table;
123
124    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
125    uint32_t physical_devices_count = 0;
126    CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
127    uint32_t physical_device_groups_count = 0;
128    CHECK_DISABLED disabled = {};
129
130    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
131    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
132
133    InstanceExtensions extensions;
134};
135
136struct layer_data {
137    debug_report_data *report_data = nullptr;
138    VkLayerDispatchTable dispatch_table;
139
140    DeviceExtensions extensions = {};
141    unordered_set<VkQueue> queues;  // All queues under given device
142    // Layer specific data
143    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
144    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
145    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
146    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
147    unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
148    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
149    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
150    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
151    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
152    unordered_map<VkDescriptorSetLayout, std::unique_ptr<cvdescriptorset::DescriptorSetLayout>> descriptorSetLayoutMap;
153    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
154    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
155    unordered_map<VkFence, FENCE_NODE> fenceMap;
156    unordered_map<VkQueue, QUEUE_STATE> queueMap;
157    unordered_map<VkEvent, EVENT_STATE> eventMap;
158    unordered_map<QueryObject, bool> queryToStateMap;
159    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
160    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
161    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
162    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
163    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
164    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
165    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
166    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
167    unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
168    unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
169
170    VkDevice device = VK_NULL_HANDLE;
171    VkPhysicalDevice physical_device = VK_NULL_HANDLE;
172
173    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
174
175    VkPhysicalDeviceFeatures enabled_features = {};
176    // Device specific data
177    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
178    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
179    VkPhysicalDeviceProperties phys_dev_props = {};
180};
181
182// TODO : Do we need to guard access to layer_data_map w/ lock?
183static unordered_map<void *, layer_data *> layer_data_map;
184static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
185
186static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
187
188static const VkLayerProperties global_layer = {
189    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
190};
191
192template <class TCreateInfo>
193void ValidateLayerOrdering(const TCreateInfo &createInfo) {
194    bool foundLayer = false;
195    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
196        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
197            foundLayer = true;
198        }
199        // This has to be logged to console as we don't have a callback at this point.
200        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
201            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
202        }
203    }
204}
205
206// TODO : This can be much smarter, using separate locks for separate global data
207static std::mutex global_lock;
208
209// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
210IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
211    auto iv_it = dev_data->imageViewMap.find(image_view);
212    if (iv_it == dev_data->imageViewMap.end()) {
213        return nullptr;
214    }
215    return iv_it->second.get();
216}
217// Return sampler node ptr for specified sampler or else NULL
218SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
219    auto sampler_it = dev_data->samplerMap.find(sampler);
220    if (sampler_it == dev_data->samplerMap.end()) {
221        return nullptr;
222    }
223    return sampler_it->second.get();
224}
225// Return image state ptr for specified image or else NULL
226IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
227    auto img_it = dev_data->imageMap.find(image);
228    if (img_it == dev_data->imageMap.end()) {
229        return nullptr;
230    }
231    return img_it->second.get();
232}
233// Return buffer state ptr for specified buffer or else NULL
234BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
235    auto buff_it = dev_data->bufferMap.find(buffer);
236    if (buff_it == dev_data->bufferMap.end()) {
237        return nullptr;
238    }
239    return buff_it->second.get();
240}
241// Return swapchain node for specified swapchain or else NULL
242SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
243    auto swp_it = dev_data->swapchainMap.find(swapchain);
244    if (swp_it == dev_data->swapchainMap.end()) {
245        return nullptr;
246    }
247    return swp_it->second.get();
248}
249// Return buffer node ptr for specified buffer or else NULL
250BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
251    auto bv_it = dev_data->bufferViewMap.find(buffer_view);
252    if (bv_it == dev_data->bufferViewMap.end()) {
253        return nullptr;
254    }
255    return bv_it->second.get();
256}
257
258FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
259    auto it = dev_data->fenceMap.find(fence);
260    if (it == dev_data->fenceMap.end()) {
261        return nullptr;
262    }
263    return &it->second;
264}
265
266EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
267    auto it = dev_data->eventMap.find(event);
268    if (it == dev_data->eventMap.end()) {
269        return nullptr;
270    }
271    return &it->second;
272}
273
274QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
275    auto it = dev_data->queryPoolMap.find(query_pool);
276    if (it == dev_data->queryPoolMap.end()) {
277        return nullptr;
278    }
279    return &it->second;
280}
281
282QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
283    auto it = dev_data->queueMap.find(queue);
284    if (it == dev_data->queueMap.end()) {
285        return nullptr;
286    }
287    return &it->second;
288}
289
290SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
291    auto it = dev_data->semaphoreMap.find(semaphore);
292    if (it == dev_data->semaphoreMap.end()) {
293        return nullptr;
294    }
295    return &it->second;
296}
297
298COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
299    auto it = dev_data->commandPoolMap.find(pool);
300    if (it == dev_data->commandPoolMap.end()) {
301        return nullptr;
302    }
303    return &it->second;
304}
305
306PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
307    auto it = instance_data->physical_device_map.find(phys);
308    if (it == instance_data->physical_device_map.end()) {
309        return nullptr;
310    }
311    return &it->second;
312}
313
314SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
315    auto it = instance_data->surface_map.find(surface);
316    if (it == instance_data->surface_map.end()) {
317        return nullptr;
318    }
319    return &it->second;
320}
321
322DeviceExtensions const *GetEnabledExtensions(layer_data const *dev_data) {
323    return &dev_data->extensions;
324}
325
326// Return ptr to memory binding for given handle of specified type
327static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
328    switch (type) {
329        case kVulkanObjectTypeImage:
330            return GetImageState(dev_data, VkImage(handle));
331        case kVulkanObjectTypeBuffer:
332            return GetBufferState(dev_data, VkBuffer(handle));
333        default:
334            break;
335    }
336    return nullptr;
337}
338// prototype
339GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
340
341// Return ptr to info in map container containing mem, or NULL if not found
342//  Calls to this function should be wrapped in mutex
343DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
344    auto mem_it = dev_data->memObjMap.find(mem);
345    if (mem_it == dev_data->memObjMap.end()) {
346        return NULL;
347    }
348    return mem_it->second.get();
349}
350
351static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
352                             const VkMemoryAllocateInfo *pAllocateInfo) {
353    assert(object != NULL);
354
355    dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
356}
357
358// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
359static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle, VulkanObjectType type,
360                                  const char *functionName) {
361    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
362    if (mem_info) {
363        if (!mem_info->bound_ranges[bound_object_handle].valid) {
364            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
365                           HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
366                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
367                           ", please fill the memory before using.",
368                           functionName, HandleToUint64(mem), object_string[type], bound_object_handle);
369        }
370    }
371    return false;
372}
373// For given image_state
374//  If mem is special swapchain key, then verify that image_state valid member is true
375//  Else verify that the image's bound memory range is valid
376bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
377    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
378        if (!image_state->valid) {
379            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
380                           HandleToUint64(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
381                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
382                           functionName, HandleToUint64(image_state->image));
383        }
384    } else {
385        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), kVulkanObjectTypeImage,
386                                     functionName);
387    }
388    return false;
389}
390// For given buffer_state, verify that the range it's bound to is valid
391bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
392    return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer,
393                                 functionName);
394}
395// For the given memory allocation, set the range bound by the given handle object to the valid param value
396static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
397    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
398    if (mem_info) {
399        mem_info->bound_ranges[handle].valid = valid;
400    }
401}
402// For given image node
403//  If mem is special swapchain key, then set entire image_state to valid param value
404//  Else set the image's bound memory range to valid param value
405void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
406    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
407        image_state->valid = valid;
408    } else {
409        SetMemoryValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), valid);
410    }
411}
412// For given buffer node set the buffer's bound memory range to valid param value
413void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
414    SetMemoryValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), valid);
415}
416
417// Create binding link between given sampler and command buffer node
418void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
419    sampler_state->cb_bindings.insert(cb_node);
420    cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
421}
422
423// Create binding link between given image node and command buffer node
424void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
425    // Skip validation if this image was created through WSI
426    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
427        // First update CB binding in MemObj mini CB list
428        for (auto mem_binding : image_state->GetBoundMemory()) {
429            DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
430            if (pMemInfo) {
431                pMemInfo->cb_bindings.insert(cb_node);
432                // Now update CBInfo's Mem reference list
433                cb_node->memObjs.insert(mem_binding);
434            }
435        }
436        // Now update cb binding for image
437        cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
438        image_state->cb_bindings.insert(cb_node);
439    }
440}
441
442// Create binding link between given image view node and its image with command buffer node
443void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
444    // First add bindings for imageView
445    view_state->cb_bindings.insert(cb_node);
446    cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
447    auto image_state = GetImageState(dev_data, view_state->create_info.image);
448    // Add bindings for image within imageView
449    if (image_state) {
450        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
451    }
452}
453
454// Create binding link between given buffer node and command buffer node
455void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
456    // First update CB binding in MemObj mini CB list
457    for (auto mem_binding : buffer_state->GetBoundMemory()) {
458        DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
459        if (pMemInfo) {
460            pMemInfo->cb_bindings.insert(cb_node);
461            // Now update CBInfo's Mem reference list
462            cb_node->memObjs.insert(mem_binding);
463        }
464    }
465    // Now update cb binding for buffer
466    cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
467    buffer_state->cb_bindings.insert(cb_node);
468}
469
470// Create binding link between given buffer view node and its buffer with command buffer node
471void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
472    // First add bindings for bufferView
473    view_state->cb_bindings.insert(cb_node);
474    cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
475    auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
476    // Add bindings for buffer within bufferView
477    if (buffer_state) {
478        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
479    }
480}
481
482// For every mem obj bound to particular CB, free bindings related to that CB
483static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
484    if (cb_node) {
485        if (cb_node->memObjs.size() > 0) {
486            for (auto mem : cb_node->memObjs) {
487                DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
488                if (pInfo) {
489                    pInfo->cb_bindings.erase(cb_node);
490                }
491            }
492            cb_node->memObjs.clear();
493        }
494        cb_node->validate_functions.clear();
495    }
496}
497
498// Clear a single object binding from given memory object, or report error if binding is missing
499static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
500    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
501    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
502    if (mem_info) {
503        mem_info->obj_bindings.erase({handle, type});
504    }
505    return false;
506}
507
508// ClearMemoryObjectBindings clears the binding of objects to memory
509//  For the given object it pulls the memory bindings and makes sure that the bindings
510//  no longer refer to the object being cleared. This occurs when objects are destroyed.
511bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
512    bool skip = false;
513    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
514    if (mem_binding) {
515        if (!mem_binding->sparse) {
516            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
517        } else {  // Sparse, clear all bindings
518            for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
519                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
520            }
521        }
522    }
523    return skip;
524}
525
526// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
527bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
528                              const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
529    bool result = false;
530    if (VK_NULL_HANDLE == mem) {
531        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
532                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
533                                                      " used with no memory bound. Memory should be bound by calling "
534                                                      "vkBind%sMemory(). %s",
535                         api_name, type_name, handle, type_name, validation_error_map[error_code]);
536    } else if (MEMORY_UNBOUND == mem) {
537        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
538                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
539                                                      " used with no memory bound and previously bound memory was freed. "
540                                                      "Memory must not be freed prior to this operation. %s",
541                         api_name, type_name, handle, validation_error_map[error_code]);
542    }
543    return result;
544}
545
546// Check to see if memory was ever bound to this image
547bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
548                                  UNIQUE_VALIDATION_ERROR_CODE error_code) {
549    bool result = false;
550    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
551        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
552                                          error_code);
553    }
554    return result;
555}
556
557// Check to see if memory was bound to this buffer
558bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
559                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
560    bool result = false;
561    if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
562        result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
563                                          "Buffer", error_code);
564    }
565    return result;
566}
567
568// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
569// Corresponding valid usage checks are in ValidateSetMemBinding().
570static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type, const char *apiName) {
571    if (mem != VK_NULL_HANDLE) {
572        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
573        assert(mem_binding);
574        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
575        if (mem_info) {
576            mem_info->obj_bindings.insert({handle, type});
577            // For image objects, make sure default memory state is correctly set
578            // TODO : What's the best/correct way to handle this?
579            if (kVulkanObjectTypeImage == type) {
580                auto const image_state = GetImageState(dev_data, VkImage(handle));
581                if (image_state) {
582                    VkImageCreateInfo ici = image_state->createInfo;
583                    if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
584                        // TODO::  More memory state transition stuff.
585                    }
586                }
587            }
588            mem_binding->binding.mem = mem;
589        }
590    }
591}
592
593// Valid usage checks for a call to SetMemBinding().
594// For NULL mem case, output warning
595// Make sure given object is in global object map
596//  IF a previous binding existed, output validation error
597//  Otherwise, add reference from objectInfo to memoryInfo
598//  Add reference off of objInfo
599// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
600static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
601                                  const char *apiName) {
602    bool skip = false;
603    // It's an error to bind an object to NULL memory
604    if (mem != VK_NULL_HANDLE) {
605        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
606        assert(mem_binding);
607        if (mem_binding->sparse) {
608            UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_1740082a;
609            const char *handle_type = "IMAGE";
610            if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
611                error_code = VALIDATION_ERROR_1700080c;
612                handle_type = "BUFFER";
613            } else {
614                assert(strcmp(apiName, "vkBindImageMemory()") == 0);
615            }
616            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
617                            HandleToUint64(mem), __LINE__, error_code, "MEM",
618                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
619                            ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT). %s",
620                            apiName, HandleToUint64(mem), handle, handle_type, validation_error_map[error_code]);
621        }
622        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
623        if (mem_info) {
624            DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
625            if (prev_binding) {
626                UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_17400828;
627                if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
628                    error_code = VALIDATION_ERROR_1700080a;
629                } else {
630                    assert(strcmp(apiName, "vkBindImageMemory()") == 0);
631                }
632                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
633                                HandleToUint64(mem), __LINE__, error_code, "MEM",
634                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
635                                ") which has already been bound to mem object 0x%" PRIxLEAST64 ". %s",
636                                apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem),
637                                validation_error_map[error_code]);
638            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
639                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
640                                HandleToUint64(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
641                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
642                                ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
643                                "Vulkan so this attempt to bind to new memory is not allowed.",
644                                apiName, HandleToUint64(mem), handle);
645            }
646        }
647    }
648    return skip;
649}
650
651// For NULL mem case, clear any previous binding Else...
652// Make sure given object is in its object map
653//  IF a previous binding existed, update binding
654//  Add reference from objectInfo to memoryInfo
655//  Add reference off of object's binding info
656// Return VK_TRUE if addition is successful, VK_FALSE otherwise
657static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
658    bool skip = VK_FALSE;
659    // Handle NULL case separately, just clear previous binding & decrement reference
660    if (binding.mem == VK_NULL_HANDLE) {
661        // TODO : This should cause the range of the resource to be unbound according to spec
662    } else {
663        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
664        assert(mem_binding);
665        assert(mem_binding->sparse);
666        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
667        if (mem_info) {
668            mem_info->obj_bindings.insert({handle, type});
669            // Need to set mem binding for this object
670            mem_binding->sparse_bindings.insert(binding);
671        }
672    }
673    return skip;
674}
675
676// Check object status for selected flag state
677static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
678                            const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
679    if (!(pNode->status & status_mask)) {
680        char const *const message = validation_error_map[msg_code];
681        return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
682                       HandleToUint64(pNode->commandBuffer), __LINE__, msg_code, "DS", "command buffer object 0x%p: %s. %s.",
683                       pNode->commandBuffer, fail_msg, message);
684    }
685    return false;
686}
687
688// Retrieve pipeline node ptr for given pipeline object
689static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
690    auto it = dev_data->pipelineMap.find(pipeline);
691    if (it == dev_data->pipelineMap.end()) {
692        return nullptr;
693    }
694    return it->second;
695}
696
697RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
698    auto it = dev_data->renderPassMap.find(renderpass);
699    if (it == dev_data->renderPassMap.end()) {
700        return nullptr;
701    }
702    return it->second.get();
703}
704
705FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
706    auto it = dev_data->frameBufferMap.find(framebuffer);
707    if (it == dev_data->frameBufferMap.end()) {
708        return nullptr;
709    }
710    return it->second.get();
711}
712
713cvdescriptorset::DescriptorSetLayout const *GetDescriptorSetLayout(layer_data const *dev_data, VkDescriptorSetLayout dsLayout) {
714    auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
715    if (it == dev_data->descriptorSetLayoutMap.end()) {
716        return nullptr;
717    }
718    return it->second.get();
719}
720
721static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
722    auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
723    if (it == dev_data->pipelineLayoutMap.end()) {
724        return nullptr;
725    }
726    return &it->second;
727}
728
729shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
730    auto it = dev_data->shaderModuleMap.find(module);
731    if (it == dev_data->shaderModuleMap.end()) {
732        return nullptr;
733    }
734    return it->second.get();
735}
736
737// Return true if for a given PSO, the given state enum is dynamic, else return false
738static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
739    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
740        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
741            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
742        }
743    }
744    return false;
745}
746
747// Validate state stored as flags at time of draw call
748static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
749                                      UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
750    bool result = false;
751    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
752        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
753         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
754        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
755                                  "Dynamic line width state not set for this command buffer", msg_code);
756    }
757    if (pPipe->graphicsPipelineCI.pRasterizationState &&
758        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
759        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
760                                  "Dynamic depth bias state not set for this command buffer", msg_code);
761    }
762    if (pPipe->blendConstantsEnabled) {
763        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
764                                  "Dynamic blend constants state not set for this command buffer", msg_code);
765    }
766    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
767        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
768        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
769                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
770    }
771    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
772        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
773        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
774                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
775        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
776                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
777        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
778                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
779    }
780    if (indexed) {
781        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
782                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
783    }
784
785    return result;
786}
787
788// Verify attachment reference compatibility according to spec
789//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
790//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
791//   to make sure that format and samples counts match.
792//  If not, they are not compatible.
793static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
794                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
795                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
796                                             const VkAttachmentDescription *pSecondaryAttachments) {
797    // Check potential NULL cases first to avoid nullptr issues later
798    if (pPrimary == nullptr) {
799        if (pSecondary == nullptr) {
800            return true;
801        }
802        return false;
803    } else if (pSecondary == nullptr) {
804        return false;
805    }
806    if (index >= primaryCount) {  // Check secondary as if primary is VK_ATTACHMENT_UNUSED
807        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) return true;
808    } else if (index >= secondaryCount) {  // Check primary as if secondary is VK_ATTACHMENT_UNUSED
809        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) return true;
810    } else {  // Format and sample count must match
811        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
812            return true;
813        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
814            return false;
815        }
816        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
817             pSecondaryAttachments[pSecondary[index].attachment].format) &&
818            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
819             pSecondaryAttachments[pSecondary[index].attachment].samples))
820            return true;
821    }
822    // Format and sample counts didn't match
823    return false;
824}
825// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
826// For given primary RenderPass object and secondary RenderPassCreateInfo, verify that they're compatible
827static bool verify_renderpass_compatibility(const layer_data *dev_data, const VkRenderPassCreateInfo *primaryRPCI,
828                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
829    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
830        stringstream errorStr;
831        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
832                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
833        errorMsg = errorStr.str();
834        return false;
835    }
836    uint32_t spIndex = 0;
837    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
838        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
839        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
840        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
841        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
842        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
843            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
844                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
845                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
846                stringstream errorStr;
847                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
848                errorMsg = errorStr.str();
849                return false;
850            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
851                                                         primaryColorCount, primaryRPCI->pAttachments,
852                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
853                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
854                stringstream errorStr;
855                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
856                errorMsg = errorStr.str();
857                return false;
858            }
859        }
860
861        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 1,
862                                              primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
863                                              1, secondaryRPCI->pAttachments)) {
864            stringstream errorStr;
865            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
866            errorMsg = errorStr.str();
867            return false;
868        }
869
870        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
871        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
872        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
873        for (uint32_t i = 0; i < inputMax; ++i) {
874            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryInputCount,
875                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
876                                                  secondaryInputCount, secondaryRPCI->pAttachments)) {
877                stringstream errorStr;
878                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
879                errorMsg = errorStr.str();
880                return false;
881            }
882        }
883    }
884    return true;
885}
886
887// Return Set node ptr for specified set or else NULL
888cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
889    auto set_it = dev_data->setMap.find(set);
890    if (set_it == dev_data->setMap.end()) {
891        return NULL;
892    }
893    return set_it->second;
894}
895
896// For given pipeline, return number of MSAA samples, or one if MSAA disabled
897static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
898    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
899        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
900        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
901    }
902    return VK_SAMPLE_COUNT_1_BIT;
903}
904
905static void list_bits(std::ostream &s, uint32_t bits) {
906    for (int i = 0; i < 32 && bits; i++) {
907        if (bits & (1 << i)) {
908            s << i;
909            bits &= ~(1 << i);
910            if (bits) {
911                s << ",";
912            }
913        }
914    }
915}
916
917// Validate draw-time state related to the PSO
918static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
919                                          PIPELINE_STATE const *pPipeline) {
920    bool skip = false;
921
922    // Verify vertex binding
923    if (pPipeline->vertexBindingDescriptions.size() > 0) {
924        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
925            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
926            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
927                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
928                skip |=
929                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
930                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
931                            "The Pipeline State Object (0x%" PRIxLEAST64
932                            ") expects that this Command Buffer's vertex binding Index %u "
933                            "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
934                            "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
935                            HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
936            }
937        }
938    } else {
939        if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
940            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
941                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
942                            DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
943                            "Vertex buffers are bound to command buffer (0x%p"
944                            ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
945                            pCB->commandBuffer, HandleToUint64(state.pipeline_state->pipeline));
946        }
947    }
948    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
949    // Skip check if rasterization is disabled or there is no viewport.
950    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
951         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
952        pPipeline->graphicsPipelineCI.pViewportState) {
953        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
954        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
955
956        if (dynViewport) {
957            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
958            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
959            if (missingViewportMask) {
960                std::stringstream ss;
961                ss << "Dynamic viewport(s) ";
962                list_bits(ss, missingViewportMask);
963                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
964                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
965                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
966            }
967        }
968
969        if (dynScissor) {
970            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
971            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
972            if (missingScissorMask) {
973                std::stringstream ss;
974                ss << "Dynamic scissor(s) ";
975                list_bits(ss, missingScissorMask);
976                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
977                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
978                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
979            }
980        }
981    }
982
983    // Verify that any MSAA request in PSO matches sample# in bound FB
984    // Skip the check if rasterization is disabled.
985    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
986        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
987        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
988        if (pCB->activeRenderPass) {
989            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
990            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
991            uint32_t i;
992            unsigned subpass_num_samples = 0;
993
994            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
995                auto attachment = subpass_desc->pColorAttachments[i].attachment;
996                if (attachment != VK_ATTACHMENT_UNUSED)
997                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
998            }
999
1000            if (subpass_desc->pDepthStencilAttachment &&
1001                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1002                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1003                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1004            }
1005
1006            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
1007                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1008                                HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1009                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
1010                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
1011                                HandleToUint64(pPipeline->pipeline), pso_num_samples,
1012                                HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
1013            }
1014        } else {
1015            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1016                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1017                            "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
1018                            HandleToUint64(pPipeline->pipeline));
1019        }
1020    }
1021    // Verify that PSO creation renderPass is compatible with active renderPass
1022    if (pCB->activeRenderPass) {
1023        std::string err_string;
1024        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
1025            !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
1026                                             err_string)) {
1027            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1028            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1029                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
1030                            "At Draw time the active render pass (0x%" PRIxLEAST64
1031                            ") is incompatible w/ gfx pipeline "
1032                            "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
1033                            HandleToUint64(pCB->activeRenderPass->renderPass), HandleToUint64(pPipeline->pipeline),
1034                            HandleToUint64(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
1035        }
1036
1037        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1038            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1039                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
1040                            "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
1041                            pCB->activeSubpass);
1042        }
1043    }
1044    // TODO : Add more checks here
1045
1046    return skip;
1047}
1048
1049// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1050// pipelineLayout[layoutIndex]
1051static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1052                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1053                                            string &errorMsg) {
1054    auto num_sets = pipeline_layout->set_layouts.size();
1055    if (layoutIndex >= num_sets) {
1056        stringstream errorStr;
1057        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1058                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1059                 << layoutIndex;
1060        errorMsg = errorStr.str();
1061        return false;
1062    }
1063    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1064    return descriptor_set->IsCompatible(layout_node, &errorMsg);
1065}
1066
1067// Validate overall state at the time of a draw call
1068static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
1069                              const VkPipelineBindPoint bind_point, const char *function,
1070                              UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1071    bool result = false;
1072    auto const &state = cb_node->lastBound[bind_point];
1073    PIPELINE_STATE *pPipe = state.pipeline_state;
1074    if (nullptr == pPipe) {
1075        result |= log_msg(
1076            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1077            HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
1078            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
1079        // Early return as any further checks below will be busted w/o a pipeline
1080        if (result) return true;
1081    }
1082    // First check flag states
1083    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1084        result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
1085
1086    // Now complete other state checks
1087    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1088        string errorString;
1089        auto pipeline_layout = pPipe->pipeline_layout;
1090
1091        for (const auto &set_binding_pair : pPipe->active_slots) {
1092            uint32_t setIndex = set_binding_pair.first;
1093            // If valid set is not bound throw an error
1094            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1095                result |=
1096                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1097                            HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
1098                            "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.",
1099                            HandleToUint64(pPipe->pipeline), setIndex);
1100            } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
1101                                                        errorString)) {
1102                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1103                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1104                result |=
1105                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1106                            HandleToUint64(setHandle), __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
1107                            "VkDescriptorSet (0x%" PRIxLEAST64
1108                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
1109                            HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
1110            } else {  // Valid set is bound and layout compatible, validate that it's updated
1111                // Pull the set node
1112                cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1113                // Validate the draw-time state for this descriptor set
1114                std::string err_str;
1115                if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], cb_node, function,
1116                                                       &err_str)) {
1117                    auto set = descriptor_set->GetSet();
1118                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1119                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), __LINE__,
1120                                      DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
1121                                      "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s time: %s",
1122                                      HandleToUint64(set), function, err_str.c_str());
1123                }
1124            }
1125        }
1126    }
1127
1128    // Check general pipeline state that needs to be validated at drawtime
1129    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, pPipe);
1130
1131    return result;
1132}
1133
1134static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1135    auto const &state = cb_state->lastBound[bind_point];
1136    PIPELINE_STATE *pPipe = state.pipeline_state;
1137    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1138        for (const auto &set_binding_pair : pPipe->active_slots) {
1139            uint32_t setIndex = set_binding_pair.first;
1140            // Pull the set node
1141            cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1142            // Bind this set and its active descriptor resources to the command buffer
1143            descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
1144            // For given active slots record updated images & buffers
1145            descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
1146        }
1147    }
1148    if (pPipe->vertexBindingDescriptions.size() > 0) {
1149        cb_state->vertex_buffer_used = true;
1150    }
1151}
1152
1153// Validate HW line width capabilities prior to setting requested line width.
1154static bool verifyLineWidth(layer_data *dev_data, DRAW_STATE_ERROR dsError, VulkanObjectType object_type, const uint64_t &target,
1155                            float lineWidth) {
1156    bool skip = false;
1157
1158    // First check to see if the physical device supports wide lines.
1159    if ((VK_FALSE == dev_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
1160        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target, __LINE__,
1161                        dsError, "DS",
1162                        "Attempt to set lineWidth to %f but physical device wideLines feature "
1163                        "not supported/enabled so lineWidth must be 1.0f!",
1164                        lineWidth);
1165    } else {
1166        // Otherwise, make sure the width falls in the valid range.
1167        if ((dev_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
1168            (dev_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
1169            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target,
1170                            __LINE__, dsError, "DS",
1171                            "Attempt to set lineWidth to %f but physical device limits line width "
1172                            "to between [%f, %f]!",
1173                            lineWidth, dev_data->phys_dev_properties.properties.limits.lineWidthRange[0],
1174                            dev_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
1175        }
1176    }
1177
1178    return skip;
1179}
1180
1181// Verify that create state for a pipeline is valid
1182static bool verifyPipelineCreateState(layer_data *dev_data, std::vector<PIPELINE_STATE *> pPipelines, int pipelineIndex) {
1183    bool skip = false;
1184
1185    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
1186
1187    // If create derivative bit is set, check that we've specified a base
1188    // pipeline correctly, and that the base pipeline was created to allow
1189    // derivatives.
1190    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1191        PIPELINE_STATE *pBasePipeline = nullptr;
1192        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1193              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1194            // This check is a superset of VALIDATION_ERROR_096005a8 and VALIDATION_ERROR_096005aa
1195            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1196                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1197                            "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1198        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1199            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1200                skip |=
1201                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1202                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_208005a0, "DS",
1203                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
1204                            validation_error_map[VALIDATION_ERROR_208005a0]);
1205            } else {
1206                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
1207            }
1208        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1209            pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
1210        }
1211
1212        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1213            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1214                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1215                            "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1216        }
1217    }
1218
1219    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1220        const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1221        auto const render_pass_info = GetRenderPassState(dev_data, pPipeline->graphicsPipelineCI.renderPass)->createInfo.ptr();
1222        const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1223        if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1224            skip |= log_msg(
1225                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1226                HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005d4, "DS",
1227                "vkCreateGraphicsPipelines(): Render pass (0x%" PRIxLEAST64
1228                ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u. %s",
1229                HandleToUint64(pPipeline->graphicsPipelineCI.renderPass), pPipeline->graphicsPipelineCI.subpass,
1230                subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount,
1231                validation_error_map[VALIDATION_ERROR_096005d4]);
1232        }
1233        if (!dev_data->enabled_features.independentBlend) {
1234            if (pPipeline->attachments.size() > 1) {
1235                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1236                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1237                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1238                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1239                    // only attachment state, so memcmp is best suited for the comparison
1240                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1241                               sizeof(pAttachments[0]))) {
1242                        skip |=
1243                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1244                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004ba, "DS",
1245                                    "Invalid Pipeline CreateInfo: If independent blend feature not "
1246                                    "enabled, all elements of pAttachments must be identical. %s",
1247                                    validation_error_map[VALIDATION_ERROR_0f4004ba]);
1248                        break;
1249                    }
1250                }
1251            }
1252        }
1253        if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1254            skip |=
1255                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1256                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004bc, "DS",
1257                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
1258                        validation_error_map[VALIDATION_ERROR_0f4004bc]);
1259        }
1260    }
1261
1262    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
1263    // produces nonsense errors that confuse users. Other layers should already
1264    // emit errors for renderpass being invalid.
1265    auto renderPass = GetRenderPassState(dev_data, pPipeline->graphicsPipelineCI.renderPass);
1266    if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
1267        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1268                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ee, "DS",
1269                        "Invalid Pipeline CreateInfo State: Subpass index %u "
1270                        "is out of range for this renderpass (0..%u). %s",
1271                        pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1,
1272                        validation_error_map[VALIDATION_ERROR_096005ee]);
1273    }
1274
1275    if (validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
1276        skip = true;
1277    }
1278    // Each shader's stage must be unique
1279    if (pPipeline->duplicate_shaders) {
1280        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1281            if (pPipeline->duplicate_shaders & stage) {
1282                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1283                                HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1284                                "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1285                                string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1286            }
1287        }
1288    }
1289    // VS is required
1290    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1291        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1292                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ae, "DS",
1293                        "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
1294                        validation_error_map[VALIDATION_ERROR_096005ae]);
1295    }
1296    // Either both or neither TC/TE shaders should be defined
1297    bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1298    bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1299    if (has_control && !has_eval) {
1300        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1301                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b2, "DS",
1302                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1303                        validation_error_map[VALIDATION_ERROR_096005b2]);
1304    }
1305    if (!has_control && has_eval) {
1306        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1307                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b4, "DS",
1308                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1309                        validation_error_map[VALIDATION_ERROR_096005b4]);
1310    }
1311    // Compute shaders should be specified independent of Gfx shaders
1312    if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1313        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1314                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b0, "DS",
1315                        "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
1316                        validation_error_map[VALIDATION_ERROR_096005b0]);
1317    }
1318    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1319    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1320    if (has_control && has_eval &&
1321        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1322         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1323        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1324                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c0, "DS",
1325                        "Invalid Pipeline CreateInfo State: "
1326                        "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
1327                        "topology for tessellation pipelines. %s",
1328                        validation_error_map[VALIDATION_ERROR_096005c0]);
1329    }
1330    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1331        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1332        if (!has_control || !has_eval) {
1333            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1334                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c2, "DS",
1335                            "Invalid Pipeline CreateInfo State: "
1336                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
1337                            "topology is only valid for tessellation pipelines. %s",
1338                            validation_error_map[VALIDATION_ERROR_096005c2]);
1339        }
1340    }
1341
1342    // If a rasterization state is provided...
1343    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1344        // Make sure that the line width conforms to the HW.
1345        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
1346            skip |=
1347                verifyLineWidth(dev_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, kVulkanObjectTypePipeline,
1348                                HandleToUint64(pPipeline->pipeline), pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
1349        }
1350
1351        if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1352            (!dev_data->enabled_features.depthClamp)) {
1353            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1354                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_1020061c, "DS",
1355                            "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable "
1356                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE. %s",
1357                            validation_error_map[VALIDATION_ERROR_1020061c]);
1358        }
1359
1360        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1361            (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1362            (!dev_data->enabled_features.depthBiasClamp)) {
1363            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1364                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1365                            "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
1366                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1367                            "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1368        }
1369
1370        // If rasterization is enabled...
1371        if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1372            auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
1373
1374            if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1375                (!dev_data->enabled_features.alphaToOne)) {
1376                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1377                                HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_10000622, "DS",
1378                                "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1379                                "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE. %s",
1380                                validation_error_map[VALIDATION_ERROR_10000622]);
1381            }
1382
1383            // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1384            if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1385                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1386                if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1387                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1388                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e0, "DS",
1389                                    "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is "
1390                                    "enabled and subpass uses a depth/stencil attachment. %s",
1391                                    validation_error_map[VALIDATION_ERROR_096005e0]);
1392
1393                } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1394                           (!dev_data->enabled_features.depthBounds)) {
1395                    skip |= log_msg(
1396                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1397                        HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1398                        "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the depthBoundsTestEnable "
1399                        "member of the VkPipelineDepthStencilStateCreateInfo structure must be set to VK_FALSE.");
1400                }
1401            }
1402
1403            // If subpass uses color attachments, pColorBlendState must be valid pointer
1404            if (subpass_desc) {
1405                uint32_t color_attachment_count = 0;
1406                for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1407                    if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1408                        ++color_attachment_count;
1409                    }
1410                }
1411                if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1412                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1413                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e2, "DS",
1414                                    "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is "
1415                                    "enabled and subpass uses color attachments. %s",
1416                                    validation_error_map[VALIDATION_ERROR_096005e2]);
1417                }
1418            }
1419        }
1420    }
1421
1422    return skip;
1423}
1424
1425// Free the Pipeline nodes
1426static void deletePipelines(layer_data *dev_data) {
1427    if (dev_data->pipelineMap.size() <= 0) return;
1428    for (auto &pipe_map_pair : dev_data->pipelineMap) {
1429        delete pipe_map_pair.second;
1430    }
1431    dev_data->pipelineMap.clear();
1432}
1433
1434// Block of code at start here specifically for managing/tracking DSs
1435
1436// Return Pool node ptr for specified pool or else NULL
1437DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
1438    auto pool_it = dev_data->descriptorPoolMap.find(pool);
1439    if (pool_it == dev_data->descriptorPoolMap.end()) {
1440        return NULL;
1441    }
1442    return pool_it->second;
1443}
1444
1445// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1446// func_str is the name of the calling function
1447// Return false if no errors occur
1448// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1449static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
1450    if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1451    bool skip = false;
1452    auto set_node = dev_data->setMap.find(set);
1453    if (set_node == dev_data->setMap.end()) {
1454        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1455                        HandleToUint64(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
1456                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
1457                        HandleToUint64(set));
1458    } else {
1459        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1460        if (set_node->second->in_use.load()) {
1461            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1462                            HandleToUint64(set), __LINE__, VALIDATION_ERROR_2860026a, "DS",
1463                            "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
1464                            func_str.c_str(), HandleToUint64(set), validation_error_map[VALIDATION_ERROR_2860026a]);
1465        }
1466    }
1467    return skip;
1468}
1469
1470// Remove set from setMap and delete the set
1471static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1472    dev_data->setMap.erase(descriptor_set->GetSet());
1473    delete descriptor_set;
1474}
1475// Free all DS Pools including their Sets & related sub-structs
1476// NOTE : Calls to this function should be wrapped in mutex
1477static void deletePools(layer_data *dev_data) {
1478    for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1479        // Remove this pools' sets from setMap and delete them
1480        for (auto ds : ii->second->sets) {
1481            freeDescriptorSet(dev_data, ds);
1482        }
1483        ii->second->sets.clear();
1484        delete ii->second;
1485        ii = dev_data->descriptorPoolMap.erase(ii);
1486    }
1487}
1488
1489static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
1490                                VkDescriptorPoolResetFlags flags) {
1491    DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1492    // TODO: validate flags
1493    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
1494    for (auto ds : pPool->sets) {
1495        freeDescriptorSet(dev_data, ds);
1496    }
1497    pPool->sets.clear();
1498    // Reset available count for each type and available sets for this pool
1499    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
1500        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
1501    }
1502    pPool->availableSets = pPool->maxSets;
1503}
1504
1505// For given CB object, fetch associated CB Node from map
1506GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
1507    auto it = dev_data->commandBufferMap.find(cb);
1508    if (it == dev_data->commandBufferMap.end()) {
1509        return NULL;
1510    }
1511    return it->second;
1512}
1513
1514// If a renderpass is active, verify that the given command type is appropriate for current subpass state
1515bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1516    if (!pCB->activeRenderPass) return false;
1517    bool skip = false;
1518    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1519        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
1520        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1521                        HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1522                        "Commands cannot be called in a subpass using secondary command buffers.");
1523    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1524        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1525                        HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1526                        "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1527    }
1528    return skip;
1529}
1530
1531bool ValidateCmdQueueFlags(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const char *caller_name, VkQueueFlags required_flags,
1532                           UNIQUE_VALIDATION_ERROR_CODE error_code) {
1533    auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
1534    if (pool) {
1535        VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
1536        if (!(required_flags & queue_flags)) {
1537            string required_flags_string;
1538            for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1539                if (flag & required_flags) {
1540                    if (required_flags_string.size()) {
1541                        required_flags_string += " or ";
1542                    }
1543                    required_flags_string += string_VkQueueFlagBits(flag);
1544                }
1545            }
1546            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1547                           HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
1548                           "Cannot call %s on a command buffer allocated from a pool without %s capabilities. %s.", caller_name,
1549                           required_flags_string.c_str(), validation_error_map[error_code]);
1550        }
1551    }
1552    return false;
1553}
1554
1555static char const * GetCauseStr(VK_OBJECT obj) {
1556    if (obj.type == kVulkanObjectTypeDescriptorSet)
1557        return "destroyed or updated";
1558    if (obj.type == kVulkanObjectTypeCommandBuffer)
1559        return "destroyed or rerecorded";
1560    return "destroyed";
1561}
1562
1563static bool ReportInvalidCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source) {
1564    bool skip = false;
1565    for (auto obj : cb_state->broken_bindings) {
1566        const char *type_str = object_string[obj.type];
1567        const char *cause_str = GetCauseStr(obj);
1568        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1569                        HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1570                        "You are adding %s to command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
1571                        call_source, cb_state->commandBuffer, type_str, obj.handle, cause_str);
1572    }
1573    return skip;
1574}
1575
1576// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1577// there's an issue with the Cmd ordering
1578bool ValidateCmd(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1579    switch (cb_state->state) {
1580        case CB_RECORDING:
1581            return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1582
1583        case CB_INVALID:
1584            return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1585
1586        default:
1587            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1588                           HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
1589                           "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
1590    }
1591}
1592
1593void UpdateCmdBufferLastCmd(GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd) {
1594    if (cb_state->state == CB_RECORDING) {
1595        cb_state->last_cmd = cmd;
1596    }
1597}
1598// For given object struct return a ptr of BASE_NODE type for its wrapping struct
1599BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
1600    BASE_NODE *base_ptr = nullptr;
1601    switch (object_struct.type) {
1602        case kVulkanObjectTypeDescriptorSet: {
1603            base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
1604            break;
1605        }
1606        case kVulkanObjectTypeSampler: {
1607            base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
1608            break;
1609        }
1610        case kVulkanObjectTypeQueryPool: {
1611            base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
1612            break;
1613        }
1614        case kVulkanObjectTypePipeline: {
1615            base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
1616            break;
1617        }
1618        case kVulkanObjectTypeBuffer: {
1619            base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
1620            break;
1621        }
1622        case kVulkanObjectTypeBufferView: {
1623            base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
1624            break;
1625        }
1626        case kVulkanObjectTypeImage: {
1627            base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
1628            break;
1629        }
1630        case kVulkanObjectTypeImageView: {
1631            base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
1632            break;
1633        }
1634        case kVulkanObjectTypeEvent: {
1635            base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
1636            break;
1637        }
1638        case kVulkanObjectTypeDescriptorPool: {
1639            base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
1640            break;
1641        }
1642        case kVulkanObjectTypeCommandPool: {
1643            base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
1644            break;
1645        }
1646        case kVulkanObjectTypeFramebuffer: {
1647            base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
1648            break;
1649        }
1650        case kVulkanObjectTypeRenderPass: {
1651            base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
1652            break;
1653        }
1654        case kVulkanObjectTypeDeviceMemory: {
1655            base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
1656            break;
1657        }
1658        default:
1659            // TODO : Any other objects to be handled here?
1660            assert(0);
1661            break;
1662    }
1663    return base_ptr;
1664}
1665
1666// Tie the VK_OBJECT to the cmd buffer which includes:
1667//  Add object_binding to cmd buffer
1668//  Add cb_binding to object
1669static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
1670    cb_bindings->insert(cb_node);
1671    cb_node->object_bindings.insert(obj);
1672}
1673// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
1674static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
1675    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
1676    if (base_obj) base_obj->cb_bindings.erase(cb_node);
1677}
1678// Reset the command buffer state
1679//  Maintain the createInfo and set state to CB_NEW, but clear all other state
1680static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
1681    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
1682    if (pCB) {
1683        pCB->in_use.store(0);
1684        pCB->last_cmd = CMD_NONE;
1685        // Reset CB state (note that createInfo is not cleared)
1686        pCB->commandBuffer = cb;
1687        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1688        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
1689        pCB->hasDrawCmd = false;
1690        pCB->state = CB_NEW;
1691        pCB->submitCount = 0;
1692        pCB->status = 0;
1693        pCB->viewportMask = 0;
1694        pCB->scissorMask = 0;
1695
1696        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
1697            pCB->lastBound[i].reset();
1698        }
1699
1700        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
1701        pCB->activeRenderPass = nullptr;
1702        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
1703        pCB->activeSubpass = 0;
1704        pCB->broken_bindings.clear();
1705        pCB->waitedEvents.clear();
1706        pCB->events.clear();
1707        pCB->writeEventsBeforeWait.clear();
1708        pCB->waitedEventsBeforeQueryReset.clear();
1709        pCB->queryToStateMap.clear();
1710        pCB->activeQueries.clear();
1711        pCB->startedQueries.clear();
1712        pCB->imageLayoutMap.clear();
1713        pCB->eventToStageMap.clear();
1714        pCB->drawData.clear();
1715        pCB->currentDrawData.buffers.clear();
1716        pCB->vertex_buffer_used = false;
1717        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
1718        // If secondary, invalidate any primary command buffer that may call us.
1719        if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1720            invalidateCommandBuffers(dev_data,
1721                                     pCB->linkedCommandBuffers,
1722                                     {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
1723        }
1724
1725        // Remove reverse command buffer links.
1726        for (auto pSubCB : pCB->linkedCommandBuffers) {
1727            pSubCB->linkedCommandBuffers.erase(pCB);
1728        }
1729        pCB->linkedCommandBuffers.clear();
1730        pCB->updateImages.clear();
1731        pCB->updateBuffers.clear();
1732        clear_cmd_buf_and_mem_references(dev_data, pCB);
1733        pCB->eventUpdates.clear();
1734        pCB->queryUpdates.clear();
1735
1736        // Remove object bindings
1737        for (auto obj : pCB->object_bindings) {
1738            removeCommandBufferBinding(dev_data, &obj, pCB);
1739        }
1740        pCB->object_bindings.clear();
1741        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
1742        for (auto framebuffer : pCB->framebuffers) {
1743            auto fb_state = GetFramebufferState(dev_data, framebuffer);
1744            if (fb_state) fb_state->cb_bindings.erase(pCB);
1745        }
1746        pCB->framebuffers.clear();
1747        pCB->activeFramebuffer = VK_NULL_HANDLE;
1748    }
1749}
1750
1751// Set PSO-related status bits for CB, including dynamic state set via PSO
1752static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
1753    // Account for any dynamic state not set via this PSO
1754    if (!pPipe->graphicsPipelineCI.pDynamicState ||
1755        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) {  // All state is static
1756        pCB->status |= CBSTATUS_ALL_STATE_SET;
1757    } else {
1758        // First consider all state on
1759        // Then unset any state that's noted as dynamic in PSO
1760        // Finally OR that into CB statemask
1761        CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
1762        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
1763            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
1764                case VK_DYNAMIC_STATE_LINE_WIDTH:
1765                    psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
1766                    break;
1767                case VK_DYNAMIC_STATE_DEPTH_BIAS:
1768                    psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
1769                    break;
1770                case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1771                    psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
1772                    break;
1773                case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1774                    psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
1775                    break;
1776                case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1777                    psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
1778                    break;
1779                case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1780                    psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
1781                    break;
1782                case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1783                    psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
1784                    break;
1785                default:
1786                    // TODO : Flag error here
1787                    break;
1788            }
1789        }
1790        pCB->status |= psoDynStateMask;
1791    }
1792}
1793
1794// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
1795// render pass.
1796bool insideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1797    bool inside = false;
1798    if (pCB->activeRenderPass) {
1799        inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1800                         HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1801                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
1802                         HandleToUint64(pCB->activeRenderPass->renderPass), validation_error_map[msgCode]);
1803    }
1804    return inside;
1805}
1806
1807// Flags validation error if the associated call is made outside a render pass. The apiName
1808// routine should ONLY be called inside a render pass.
1809bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1810    bool outside = false;
1811    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
1812        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
1813         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
1814        outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1815                          HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1816                          "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
1817    }
1818    return outside;
1819}
1820
1821static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
1822    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
1823}
1824
1825// For the given ValidationCheck enum, set all relevant instance disabled flags to true
1826void SetDisabledFlags(instance_layer_data *instance_data, VkValidationFlagsEXT *val_flags_struct) {
1827    for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
1828        switch (val_flags_struct->pDisabledValidationChecks[i]) {
1829            case VK_VALIDATION_CHECK_SHADERS_EXT:
1830                instance_data->disabled.shader_validation = true;
1831                break;
1832            case VK_VALIDATION_CHECK_ALL_EXT:
1833                // Set all disabled flags to true
1834                instance_data->disabled.SetAll(true);
1835                break;
1836            default:
1837                break;
1838        }
1839    }
1840}
1841
1842VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
1843                                              VkInstance *pInstance) {
1844    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
1845
1846    assert(chain_info->u.pLayerInfo);
1847    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
1848    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
1849    if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
1850
1851    // Advance the link info for the next element on the chain
1852    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
1853
1854    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
1855    if (result != VK_SUCCESS) return result;
1856
1857    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
1858    instance_data->instance = *pInstance;
1859    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
1860    instance_data->report_data = debug_report_create_instance(
1861        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
1862    instance_data->extensions.InitFromInstanceCreateInfo(pCreateInfo);
1863    init_core_validation(instance_data, pAllocator);
1864
1865    ValidateLayerOrdering(*pCreateInfo);
1866    // Parse any pNext chains
1867    if (pCreateInfo->pNext) {
1868        GENERIC_HEADER *struct_header = (GENERIC_HEADER *)pCreateInfo->pNext;
1869        while (struct_header) {
1870            // Check for VkValidationFlagsExt
1871            if (VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT == struct_header->sType) {
1872                SetDisabledFlags(instance_data, (VkValidationFlagsEXT *)struct_header);
1873            }
1874            struct_header = (GENERIC_HEADER *)struct_header->pNext;
1875        }
1876    }
1877
1878    return result;
1879}
1880
1881// Hook DestroyInstance to remove tableInstanceMap entry
1882VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
1883    // TODOSC : Shouldn't need any customization here
1884    dispatch_key key = get_dispatch_key(instance);
1885    // TBD: Need any locking this early, in case this function is called at the
1886    // same time by more than one thread?
1887    instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
1888    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
1889
1890    std::lock_guard<std::mutex> lock(global_lock);
1891    // Clean up logging callback, if any
1892    while (instance_data->logging_callback.size() > 0) {
1893        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
1894        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
1895        instance_data->logging_callback.pop_back();
1896    }
1897
1898    layer_debug_report_destroy_instance(instance_data->report_data);
1899    FreeLayerDataPtr(key, instance_layer_data_map);
1900}
1901
1902static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1903                                              uint32_t requested_queue_family, int32_t err_code, const char *cmd_name,
1904                                              const char *queue_family_var_name, const char *vu_note = nullptr) {
1905    bool skip = false;
1906
1907    if (!vu_note) vu_note = validation_error_map[err_code];
1908
1909    const char *conditional_ext_cmd =
1910        instance_data->extensions.vk_khr_get_physical_device_properties_2 ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR" : "";
1911
1912    std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
1913                                 ? "the pQueueFamilyPropertyCount was never obtained"
1914                                 : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
1915
1916    if (requested_queue_family >= pd_state->queue_family_count) {
1917        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
1918                        HandleToUint64(pd_state->phys_device), __LINE__, err_code, "DL",
1919                        "%s: %s (= %" PRIu32
1920                        ") is not less than any previously obtained pQueueFamilyPropertyCount from "
1921                        "vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
1922                        cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str(), vu_note);
1923    }
1924    return skip;
1925}
1926
1927// Verify VkDeviceQueueCreateInfos
1928static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1929                                           uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
1930    bool skip = false;
1931
1932    for (uint32_t i = 0; i < info_count; ++i) {
1933        const auto requested_queue_family = infos[i].queueFamilyIndex;
1934
1935        // Verify that requested queue family is known to be valid at this point in time
1936        std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
1937        skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family, VALIDATION_ERROR_06c002fa,
1938                                                  "vkCreateDevice", queue_family_var_name.c_str());
1939
1940        // Verify that requested  queue count of queue family is known to be valid at this point in time
1941        if (requested_queue_family < pd_state->queue_family_count) {
1942            const auto requested_queue_count = infos[i].queueCount;
1943            const auto queue_family_props_count = pd_state->queue_family_properties.size();
1944            const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
1945            const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
1946                                                  ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR"
1947                                                  : "";
1948            std::string count_note =
1949                !queue_family_has_props
1950                    ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
1951                    : "i.e. is not less than or equal to " +
1952                          std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
1953
1954            if (!queue_family_has_props ||
1955                requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
1956                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1957                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), __LINE__,
1958                                VALIDATION_ERROR_06c002fc, "DL",
1959                                "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
1960                                ") is not "
1961                                "less than or equal to available queue count for this "
1962                                "pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex} (=%" PRIu32
1963                                ") obtained previously "
1964                                "from vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
1965                                i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str(),
1966                                validation_error_map[VALIDATION_ERROR_06c002fc]);
1967            }
1968        }
1969    }
1970
1971    return skip;
1972}
1973
1974// Verify that features have been queried and that they are available
1975static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1976                                      const VkPhysicalDeviceFeatures *requested_features) {
1977    bool skip = false;
1978
1979    const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features);
1980    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
1981    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
1982    //  Need to provide the struct member name with the issue. To do that seems like we'll
1983    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
1984    uint32_t errors = 0;
1985    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1986    for (uint32_t i = 0; i < total_bools; i++) {
1987        if (requested[i] > actual[i]) {
1988            // TODO: Add index to struct member name helper to be able to include a feature name
1989            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1990                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
1991                            "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
1992                            "which is not available on this device.",
1993                            i);
1994            errors++;
1995        }
1996    }
1997    if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
1998        // If user didn't request features, notify them that they should
1999        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
2000        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2001                        0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
2002                        "You requested features that are unavailable on this device. You should first query feature "
2003                        "availability by calling vkGetPhysicalDeviceFeatures().");
2004    }
2005    return skip;
2006}
2007
2008VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2009                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2010    bool skip = false;
2011    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2012
2013    std::unique_lock<std::mutex> lock(global_lock);
2014    auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
2015
2016    // TODO: object_tracker should perhaps do this instead
2017    //       and it does not seem to currently work anyway -- the loader just crashes before this point
2018    if (!GetPhysicalDeviceState(instance_data, gpu)) {
2019        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2020                        0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
2021                        "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2022    }
2023
2024    // Check that any requested features are available
2025    if (pCreateInfo->pEnabledFeatures) {
2026        skip |= ValidateRequestedFeatures(instance_data, pd_state, pCreateInfo->pEnabledFeatures);
2027    }
2028    skip |=
2029        ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2030
2031    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2032
2033    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2034
2035    assert(chain_info->u.pLayerInfo);
2036    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2037    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2038    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
2039    if (fpCreateDevice == NULL) {
2040        return VK_ERROR_INITIALIZATION_FAILED;
2041    }
2042
2043    // Advance the link info for the next element on the chain
2044    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2045
2046    lock.unlock();
2047
2048    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
2049    if (result != VK_SUCCESS) {
2050        return result;
2051    }
2052
2053    lock.lock();
2054    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
2055
2056    device_data->instance_data = instance_data;
2057    // Setup device dispatch table
2058    layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
2059    device_data->device = *pDevice;
2060    // Save PhysicalDevice handle
2061    device_data->physical_device = gpu;
2062
2063    device_data->report_data = layer_debug_report_create_device(instance_data->report_data, *pDevice);
2064    device_data->extensions.InitFromDeviceCreateInfo(&instance_data->extensions, pCreateInfo);
2065
2066    // Get physical device limits for this device
2067    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
2068    uint32_t count;
2069    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2070    device_data->phys_dev_properties.queue_family_properties.resize(count);
2071    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
2072        gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
2073    // TODO: device limits should make sure these are compatible
2074    if (pCreateInfo->pEnabledFeatures) {
2075        device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
2076    } else {
2077        memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
2078    }
2079    // Store physical device properties and physical device mem limits into device layer_data structs
2080    instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
2081    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
2082    lock.unlock();
2083
2084    ValidateLayerOrdering(*pCreateInfo);
2085
2086    return result;
2087}
2088
2089// prototype
2090VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2091    // TODOSC : Shouldn't need any customization here
2092    dispatch_key key = get_dispatch_key(device);
2093    layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
2094    // Free all the memory
2095    std::unique_lock<std::mutex> lock(global_lock);
2096    deletePipelines(dev_data);
2097    dev_data->renderPassMap.clear();
2098    for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
2099        delete (*ii).second;
2100    }
2101    dev_data->commandBufferMap.clear();
2102    // This will also delete all sets in the pool & remove them from setMap
2103    deletePools(dev_data);
2104    // All sets should be removed
2105    assert(dev_data->setMap.empty());
2106    dev_data->descriptorSetLayoutMap.clear();
2107    dev_data->imageViewMap.clear();
2108    dev_data->imageMap.clear();
2109    dev_data->imageSubresourceMap.clear();
2110    dev_data->imageLayoutMap.clear();
2111    dev_data->bufferViewMap.clear();
2112    dev_data->bufferMap.clear();
2113    // Queues persist until device is destroyed
2114    dev_data->queueMap.clear();
2115    // Report any memory leaks
2116    layer_debug_report_destroy_device(device);
2117    lock.unlock();
2118
2119#if DISPATCH_MAP_DEBUG
2120    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
2121#endif
2122
2123    dev_data->dispatch_table.DestroyDevice(device, pAllocator);
2124    FreeLayerDataPtr(key, layer_data_map);
2125}
2126
2127static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
2128
2129// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2130//   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
2131static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2132                                         UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
2133    bool skip = false;
2134    if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2135        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2136                        geo_error_id, "DL",
2137                        "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when "
2138                        "device does not have geometryShader feature enabled. %s",
2139                        caller, validation_error_map[geo_error_id]);
2140    }
2141    if (!dev_data->enabled_features.tessellationShader &&
2142        (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2143        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2144                        tess_error_id, "DL",
2145                        "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT "
2146                        "and/or VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device "
2147                        "does not have tessellationShader feature enabled. %s",
2148                        caller, validation_error_map[tess_error_id]);
2149    }
2150    return skip;
2151}
2152
2153// Loop through bound objects and increment their in_use counts.
2154static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2155    for (auto obj : cb_node->object_bindings) {
2156        auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2157        if (base_obj) {
2158            base_obj->in_use.fetch_add(1);
2159        }
2160    }
2161}
2162// Track which resources are in-flight by atomically incrementing their "in_use" count
2163static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2164    cb_node->submitCount++;
2165    cb_node->in_use.fetch_add(1);
2166
2167    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2168    IncrementBoundObjects(dev_data, cb_node);
2169    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2170    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2171    //  should then be flagged prior to calling this function
2172    for (auto drawDataElement : cb_node->drawData) {
2173        for (auto buffer : drawDataElement.buffers) {
2174            auto buffer_state = GetBufferState(dev_data, buffer);
2175            if (buffer_state) {
2176                buffer_state->in_use.fetch_add(1);
2177            }
2178        }
2179    }
2180    for (auto event : cb_node->writeEventsBeforeWait) {
2181        auto event_state = GetEventNode(dev_data, event);
2182        if (event_state) event_state->write_in_use++;
2183    }
2184}
2185
2186// Note: This function assumes that the global lock is held by the calling thread.
2187// For the given queue, verify the queue state up to the given seq number.
2188// Currently the only check is to make sure that if there are events to be waited on prior to
2189//  a QueryReset, make sure that all such events have been signalled.
2190static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2191    bool skip = false;
2192
2193    // sequence number we want to validate up to, per queue
2194    std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs { { initial_queue, initial_seq } };
2195    // sequence number we've completed validation for, per queue
2196    std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2197    std::vector<QUEUE_STATE *> worklist { initial_queue };
2198
2199    while (worklist.size()) {
2200        auto queue = worklist.back();
2201        worklist.pop_back();
2202
2203        auto target_seq = target_seqs[queue];
2204        auto seq = std::max(done_seqs[queue], queue->seq);
2205        auto sub_it = queue->submissions.begin() + int(seq - queue->seq);  // seq >= queue->seq
2206
2207        for (; seq < target_seq; ++sub_it, ++seq) {
2208            for (auto &wait : sub_it->waitSemaphores) {
2209                auto other_queue = GetQueueState(dev_data, wait.queue);
2210
2211                if (other_queue == queue)
2212                    continue;   // semaphores /always/ point backwards, so no point here.
2213
2214                auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2215                auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2216
2217                // if this wait is for another queue, and covers new sequence
2218                // numbers beyond what we've already validated, mark the new
2219                // target seq and (possibly-re)add the queue to the worklist.
2220                if (other_done_seq < other_target_seq) {
2221                    target_seqs[other_queue] = other_target_seq;
2222                    worklist.push_back(other_queue);
2223                }
2224            }
2225
2226            for (auto cb : sub_it->cbs) {
2227                auto cb_node = GetCBNode(dev_data, cb);
2228                if (cb_node) {
2229                    for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
2230                        for (auto event : queryEventsPair.second) {
2231                            if (dev_data->eventMap[event].needsSignaled) {
2232                                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2233                                                VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
2234                                                "Cannot get query results on queryPool 0x%" PRIx64
2235                                                " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
2236                                                HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
2237                                                HandleToUint64(event));
2238                            }
2239                        }
2240                    }
2241                }
2242            }
2243        }
2244
2245        // finally mark the point we've now validated this queue to.
2246        done_seqs[queue] = seq;
2247    }
2248
2249    return skip;
2250}
2251
2252// When the given fence is retired, verify outstanding queue operations through the point of the fence
2253static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2254    auto fence_state = GetFenceNode(dev_data, fence);
2255    if (VK_NULL_HANDLE != fence_state->signaler.first) {
2256        return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
2257    }
2258    return false;
2259}
2260
2261// Decrement in-use count for objects bound to command buffer
2262static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2263    BASE_NODE *base_obj = nullptr;
2264    for (auto obj : cb_node->object_bindings) {
2265        base_obj = GetStateStructPtrFromObject(dev_data, obj);
2266        if (base_obj) {
2267            base_obj->in_use.fetch_sub(1);
2268        }
2269    }
2270}
2271
2272static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2273    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2274
2275    // Roll this queue forward, one submission at a time.
2276    while (pQueue->seq < seq) {
2277        auto &submission = pQueue->submissions.front();
2278
2279        for (auto &wait : submission.waitSemaphores) {
2280            auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
2281            if (pSemaphore) {
2282                pSemaphore->in_use.fetch_sub(1);
2283            }
2284            auto &lastSeq = otherQueueSeqs[wait.queue];
2285            lastSeq = std::max(lastSeq, wait.seq);
2286        }
2287
2288        for (auto &semaphore : submission.signalSemaphores) {
2289            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2290            if (pSemaphore) {
2291                pSemaphore->in_use.fetch_sub(1);
2292            }
2293        }
2294
2295        for (auto cb : submission.cbs) {
2296            auto cb_node = GetCBNode(dev_data, cb);
2297            if (!cb_node) {
2298                continue;
2299            }
2300            // First perform decrement on general case bound objects
2301            DecrementBoundResources(dev_data, cb_node);
2302            for (auto drawDataElement : cb_node->drawData) {
2303                for (auto buffer : drawDataElement.buffers) {
2304                    auto buffer_state = GetBufferState(dev_data, buffer);
2305                    if (buffer_state) {
2306                        buffer_state->in_use.fetch_sub(1);
2307                    }
2308                }
2309            }
2310            for (auto event : cb_node->writeEventsBeforeWait) {
2311                auto eventNode = dev_data->eventMap.find(event);
2312                if (eventNode != dev_data->eventMap.end()) {
2313                    eventNode->second.write_in_use--;
2314                }
2315            }
2316            for (auto queryStatePair : cb_node->queryToStateMap) {
2317                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2318            }
2319            for (auto eventStagePair : cb_node->eventToStageMap) {
2320                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2321            }
2322
2323            cb_node->in_use.fetch_sub(1);
2324        }
2325
2326        auto pFence = GetFenceNode(dev_data, submission.fence);
2327        if (pFence) {
2328            pFence->state = FENCE_RETIRED;
2329        }
2330
2331        pQueue->submissions.pop_front();
2332        pQueue->seq++;
2333    }
2334
2335    // Roll other queues forward to the highest seq we saw a wait for
2336    for (auto qs : otherQueueSeqs) {
2337        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
2338    }
2339}
2340
2341// Submit a fence to a queue, delimiting previous fences and previous untracked
2342// work by it.
2343static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2344    pFence->state = FENCE_INFLIGHT;
2345    pFence->signaler.first = pQueue->queue;
2346    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2347}
2348
2349static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2350    bool skip = false;
2351    if ((pCB->in_use.load() || current_submit_count > 1) &&
2352        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2353        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2354                        __LINE__, VALIDATION_ERROR_31a0008e, "DS",
2355                        "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
2356                        validation_error_map[VALIDATION_ERROR_31a0008e]);
2357    }
2358    return skip;
2359}
2360
2361static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2362                                       int current_submit_count, UNIQUE_VALIDATION_ERROR_CODE vu_id) {
2363    bool skip = false;
2364    if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2365    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2366    if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2367        (cb_state->submitCount + current_submit_count > 1)) {
2368        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2369                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
2370                        "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
2371                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
2372                        cb_state->commandBuffer, cb_state->submitCount + current_submit_count);
2373    }
2374
2375    // Validate that cmd buffers have been updated
2376    if (CB_RECORDED != cb_state->state) {
2377        if (CB_INVALID == cb_state->state) {
2378            skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2379        } else if (CB_NEW == cb_state->state) {
2380            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2381                            (uint64_t)(cb_state->commandBuffer), __LINE__, vu_id, "DS",
2382                            "Command buffer 0x%p used in the call to %s is unrecorded and contains no commands. %s",
2383                            cb_state->commandBuffer, call_source, validation_error_map[vu_id]);
2384        } else {  // Flag error for using CB w/o vkEndCommandBuffer() called
2385            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2386                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
2387                            "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!",
2388                            cb_state->commandBuffer, call_source);
2389        }
2390    }
2391    return skip;
2392}
2393
2394static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2395    bool skip = false;
2396
2397    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2398    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2399    //  should then be flagged prior to calling this function
2400    for (auto drawDataElement : cb_node->drawData) {
2401        for (auto buffer : drawDataElement.buffers) {
2402            auto buffer_state = GetBufferState(dev_data, buffer);
2403            if (!buffer_state) {
2404                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2405                                HandleToUint64(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
2406                                "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", HandleToUint64(buffer));
2407            }
2408        }
2409    }
2410    return skip;
2411}
2412
2413// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
2414bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
2415                           const uint32_t *indices) {
2416    bool found = false;
2417    bool skip = false;
2418    auto queue_state = GetQueueState(dev_data, queue);
2419    if (queue_state) {
2420        for (uint32_t i = 0; i < count; i++) {
2421            if (indices[i] == queue_state->queueFamilyIndex) {
2422                found = true;
2423                break;
2424            }
2425        }
2426
2427        if (!found) {
2428            skip = log_msg(
2429                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type], object->handle, __LINE__,
2430                DRAWSTATE_INVALID_QUEUE_FAMILY, "DS", "vkQueueSubmit: Command buffer 0x%" PRIxLEAST64 " contains %s 0x%" PRIxLEAST64
2431                                                      " which was not created allowing concurrent access to this queue family %d.",
2432                HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle, queue_state->queueFamilyIndex);
2433        }
2434    }
2435    return skip;
2436}
2437
2438// Validate that queueFamilyIndices of primary command buffers match this queue
2439// Secondary command buffers were previously validated in vkCmdExecuteCommands().
2440static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2441    bool skip = false;
2442    auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
2443    auto queue_state = GetQueueState(dev_data, queue);
2444
2445    if (pPool && queue_state) {
2446        if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2447            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2448                            HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_31a00094, "DS",
2449                            "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
2450                            "0x%p from queue family %d. %s",
2451                            pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
2452                            validation_error_map[VALIDATION_ERROR_31a00094]);
2453        }
2454
2455        // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2456        for (auto object : pCB->object_bindings) {
2457            if (object.type == kVulkanObjectTypeImage) {
2458                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
2459                if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2460                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2461                                                  image_state->createInfo.pQueueFamilyIndices);
2462                }
2463            } else if (object.type == kVulkanObjectTypeBuffer) {
2464                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
2465                if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2466                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2467                                                  buffer_state->createInfo.pQueueFamilyIndices);
2468                }
2469            }
2470        }
2471    }
2472
2473    return skip;
2474}
2475
2476static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2477    // Track in-use for resources off of primary and any secondary CBs
2478    bool skip = false;
2479
2480    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2481    // on device
2482    skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2483
2484    skip |= validateResources(dev_data, pCB);
2485
2486    for (auto pSubCB : pCB->linkedCommandBuffers) {
2487        skip |= validateResources(dev_data, pSubCB);
2488        // TODO: replace with invalidateCommandBuffers() at recording.
2489        if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2490            !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2491            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2492                    __LINE__, VALIDATION_ERROR_31a00092, "DS",
2493                    "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
2494                    "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
2495                    pCB->commandBuffer, pSubCB->commandBuffer, pSubCB->primaryCommandBuffer,
2496                    validation_error_map[VALIDATION_ERROR_31a00092]);
2497        }
2498    }
2499
2500    skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count, VALIDATION_ERROR_31a00090);
2501
2502    return skip;
2503}
2504
2505static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2506    bool skip = false;
2507
2508    if (pFence) {
2509        if (pFence->state == FENCE_INFLIGHT) {
2510            // TODO: opportunities for VALIDATION_ERROR_31a00080, VALIDATION_ERROR_316008b4, VALIDATION_ERROR_16400a0e
2511            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2512                            HandleToUint64(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
2513                            "Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
2514        }
2515
2516        else if (pFence->state == FENCE_RETIRED) {
2517            // TODO: opportunities for VALIDATION_ERROR_31a0007e, VALIDATION_ERROR_316008b2, VALIDATION_ERROR_16400a0e
2518            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2519                            HandleToUint64(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2520                            "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
2521                            HandleToUint64(pFence->fence));
2522        }
2523    }
2524
2525    return skip;
2526}
2527
2528static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2529                                      VkFence fence) {
2530    auto pQueue = GetQueueState(dev_data, queue);
2531    auto pFence = GetFenceNode(dev_data, fence);
2532
2533    // Mark the fence in-use.
2534    if (pFence) {
2535        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2536    }
2537
2538    // Now process each individual submit
2539    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2540        std::vector<VkCommandBuffer> cbs;
2541        const VkSubmitInfo *submit = &pSubmits[submit_idx];
2542        vector<SEMAPHORE_WAIT> semaphore_waits;
2543        vector<VkSemaphore> semaphore_signals;
2544        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2545            VkSemaphore semaphore = submit->pWaitSemaphores[i];
2546            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2547            if (pSemaphore) {
2548                if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
2549                    semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
2550                    pSemaphore->in_use.fetch_add(1);
2551                }
2552                pSemaphore->signaler.first = VK_NULL_HANDLE;
2553                pSemaphore->signaled = false;
2554            }
2555        }
2556        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2557            VkSemaphore semaphore = submit->pSignalSemaphores[i];
2558            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2559            if (pSemaphore) {
2560                pSemaphore->signaler.first = queue;
2561                pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
2562                pSemaphore->signaled = true;
2563                pSemaphore->in_use.fetch_add(1);
2564                semaphore_signals.push_back(semaphore);
2565            }
2566        }
2567        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2568            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2569            if (cb_node) {
2570                cbs.push_back(submit->pCommandBuffers[i]);
2571                for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2572                    cbs.push_back(secondaryCmdBuffer->commandBuffer);
2573                }
2574                UpdateCmdBufImageLayouts(dev_data, cb_node);
2575                incrementResources(dev_data, cb_node);
2576                for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2577                    incrementResources(dev_data, secondaryCmdBuffer);
2578                }
2579            }
2580        }
2581        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
2582                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
2583    }
2584
2585    if (pFence && !submitCount) {
2586        // If no submissions, but just dropping a fence on the end of the queue,
2587        // record an empty submission with just the fence, so we can determine
2588        // its completion.
2589        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
2590                                         fence);
2591    }
2592}
2593
2594static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2595                                       VkFence fence) {
2596    auto pFence = GetFenceNode(dev_data, fence);
2597    bool skip = ValidateFenceForSubmit(dev_data, pFence);
2598    if (skip) {
2599        return true;
2600    }
2601
2602    unordered_set<VkSemaphore> signaled_semaphores;
2603    unordered_set<VkSemaphore> unsignaled_semaphores;
2604    vector<VkCommandBuffer> current_cmds;
2605    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap = dev_data->imageLayoutMap;
2606    // Now verify each individual submit
2607    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2608        const VkSubmitInfo *submit = &pSubmits[submit_idx];
2609        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2610            skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
2611                                                 VALIDATION_ERROR_13c00098, VALIDATION_ERROR_13c0009a);
2612            VkSemaphore semaphore = submit->pWaitSemaphores[i];
2613            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2614            if (pSemaphore) {
2615                if (unsignaled_semaphores.count(semaphore) ||
2616                    (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
2617                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2618                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2619                                    "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
2620                                    HandleToUint64(semaphore));
2621                } else {
2622                    signaled_semaphores.erase(semaphore);
2623                    unsignaled_semaphores.insert(semaphore);
2624                }
2625            }
2626        }
2627        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2628            VkSemaphore semaphore = submit->pSignalSemaphores[i];
2629            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2630            if (pSemaphore) {
2631                if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
2632                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2633                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2634                                    "Queue 0x%p is signaling semaphore 0x%" PRIx64
2635                                    " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
2636                                    queue, HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
2637                } else {
2638                    unsignaled_semaphores.erase(semaphore);
2639                    signaled_semaphores.insert(semaphore);
2640                }
2641            }
2642        }
2643        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2644            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2645            if (cb_node) {
2646                skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, localImageLayoutMap);
2647                current_cmds.push_back(submit->pCommandBuffers[i]);
2648                skip |= validatePrimaryCommandBufferState(
2649                    dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
2650                skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
2651
2652                // Potential early exit here as bad object state may crash in delayed function calls
2653                if (skip) {
2654                    return true;
2655                }
2656
2657                // Call submit-time functions to validate/update state
2658                for (auto &function : cb_node->validate_functions) {
2659                    skip |= function();
2660                }
2661                for (auto &function : cb_node->eventUpdates) {
2662                    skip |= function(queue);
2663                }
2664                for (auto &function : cb_node->queryUpdates) {
2665                    skip |= function(queue);
2666                }
2667            }
2668        }
2669    }
2670    return skip;
2671}
2672
2673VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
2674    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2675    std::unique_lock<std::mutex> lock(global_lock);
2676
2677    bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2678    lock.unlock();
2679
2680    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2681
2682    VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
2683
2684    lock.lock();
2685    PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2686    lock.unlock();
2687    return result;
2688}
2689
2690static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
2691    bool skip = false;
2692    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
2693        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2694                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_16c004f8, "MEM",
2695                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
2696                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
2697                        validation_error_map[VALIDATION_ERROR_16c004f8]);
2698    }
2699    return skip;
2700}
2701
2702static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
2703    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
2704    return;
2705}
2706
2707VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
2708                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
2709    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
2710    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2711    std::unique_lock<std::mutex> lock(global_lock);
2712    bool skip = PreCallValidateAllocateMemory(dev_data);
2713    if (!skip) {
2714        lock.unlock();
2715        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
2716        lock.lock();
2717        if (VK_SUCCESS == result) {
2718            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
2719        }
2720    }
2721    return result;
2722}
2723
2724// For given obj node, if it is use, flag a validation error and return callback result, else return false
2725bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
2726                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
2727    if (dev_data->instance_data->disabled.object_in_use) return false;
2728    bool skip = false;
2729    if (obj_node->in_use.load()) {
2730        skip |=
2731            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
2732                    __LINE__, error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
2733                    object_string[obj_struct.type], obj_struct.handle, validation_error_map[error_code]);
2734    }
2735    return skip;
2736}
2737
2738static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
2739    *mem_info = GetMemObjInfo(dev_data, mem);
2740    *obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
2741    if (dev_data->instance_data->disabled.free_memory) return false;
2742    bool skip = false;
2743    if (*mem_info) {
2744        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_2880054a);
2745    }
2746    return skip;
2747}
2748
2749static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
2750    // Clear mem binding for any bound objects
2751    for (auto obj : mem_info->obj_bindings) {
2752        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle, __LINE__,
2753                MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
2754                obj.handle, HandleToUint64(mem_info->mem));
2755        switch (obj.type) {
2756            case kVulkanObjectTypeImage: {
2757                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
2758                assert(image_state);  // Any destroyed images should already be removed from bindings
2759                image_state->binding.mem = MEMORY_UNBOUND;
2760                break;
2761            }
2762            case kVulkanObjectTypeBuffer: {
2763                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
2764                assert(buffer_state);  // Any destroyed buffers should already be removed from bindings
2765                buffer_state->binding.mem = MEMORY_UNBOUND;
2766                break;
2767            }
2768            default:
2769                // Should only have buffer or image objects bound to memory
2770                assert(0);
2771        }
2772    }
2773    // Any bound cmd buffers are now invalid
2774    invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
2775    dev_data->memObjMap.erase(mem);
2776}
2777
2778VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
2779    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2780    DEVICE_MEM_INFO *mem_info = nullptr;
2781    VK_OBJECT obj_struct;
2782    std::unique_lock<std::mutex> lock(global_lock);
2783    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
2784    if (!skip) {
2785        lock.unlock();
2786        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
2787        lock.lock();
2788        if (mem != VK_NULL_HANDLE) {
2789            PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
2790        }
2791    }
2792}
2793
2794// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
2795//  and that the size of the map range should be:
2796//  1. Not zero
2797//  2. Within the size of the memory allocation
2798static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
2799    bool skip = false;
2800
2801    if (size == 0) {
2802        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2803                       HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2804                       "VkMapMemory: Attempting to map memory range of size zero");
2805    }
2806
2807    auto mem_element = dev_data->memObjMap.find(mem);
2808    if (mem_element != dev_data->memObjMap.end()) {
2809        auto mem_info = mem_element->second.get();
2810        // It is an application error to call VkMapMemory on an object that is already mapped
2811        if (mem_info->mem_range.size != 0) {
2812            skip =
2813                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2814                        HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2815                        "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, HandleToUint64(mem));
2816        }
2817
2818        // Validate that offset + size is within object's allocationSize
2819        if (size == VK_WHOLE_SIZE) {
2820            if (offset >= mem_info->alloc_info.allocationSize) {
2821                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2822                               HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2823                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
2824                               " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
2825                               offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
2826            }
2827        } else {
2828            if ((offset + size) > mem_info->alloc_info.allocationSize) {
2829                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2830                               HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200552, "MEM",
2831                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s",
2832                               offset, size + offset, mem_info->alloc_info.allocationSize,
2833                               validation_error_map[VALIDATION_ERROR_31200552]);
2834            }
2835        }
2836    }
2837    return skip;
2838}
2839
2840static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
2841    auto mem_info = GetMemObjInfo(dev_data, mem);
2842    if (mem_info) {
2843        mem_info->mem_range.offset = offset;
2844        mem_info->mem_range.size = size;
2845    }
2846}
2847
2848static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
2849    bool skip = false;
2850    auto mem_info = GetMemObjInfo(dev_data, mem);
2851    if (mem_info) {
2852        if (!mem_info->mem_range.size) {
2853            // Valid Usage: memory must currently be mapped
2854            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2855                           HandleToUint64(mem), __LINE__, VALIDATION_ERROR_33600562, "MEM",
2856                           "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", HandleToUint64(mem),
2857                           validation_error_map[VALIDATION_ERROR_33600562]);
2858        }
2859        mem_info->mem_range.size = 0;
2860        if (mem_info->shadow_copy) {
2861            free(mem_info->shadow_copy_base);
2862            mem_info->shadow_copy_base = 0;
2863            mem_info->shadow_copy = 0;
2864        }
2865    }
2866    return skip;
2867}
2868
2869// Guard value for pad data
2870static char NoncoherentMemoryFillValue = 0xb;
2871
2872static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
2873                                     void **ppData) {
2874    auto mem_info = GetMemObjInfo(dev_data, mem);
2875    if (mem_info) {
2876        mem_info->p_driver_data = *ppData;
2877        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
2878        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
2879            mem_info->shadow_copy = 0;
2880        } else {
2881            if (size == VK_WHOLE_SIZE) {
2882                size = mem_info->alloc_info.allocationSize - offset;
2883            }
2884            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
2885            assert(SafeModulo(mem_info->shadow_pad_size,
2886                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
2887            // Ensure start of mapped region reflects hardware alignment constraints
2888            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
2889
2890            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
2891            uint64_t start_offset = offset % map_alignment;
2892            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
2893            mem_info->shadow_copy_base =
2894                malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
2895
2896            mem_info->shadow_copy =
2897                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
2898                                         ~(map_alignment - 1)) +
2899                start_offset;
2900            assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
2901                                  map_alignment) == 0);
2902
2903            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
2904            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
2905        }
2906    }
2907}
2908
2909// Verify that state for fence being waited on is appropriate. That is,
2910//  a fence being waited on should not already be signaled and
2911//  it should have been submitted on a queue or during acquire next image
2912static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
2913    bool skip = false;
2914
2915    auto pFence = GetFenceNode(dev_data, fence);
2916    if (pFence) {
2917        if (pFence->state == FENCE_UNSIGNALED) {
2918            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2919                            HandleToUint64(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2920                            "%s called for fence 0x%" PRIxLEAST64
2921                            " which has not been submitted on a Queue or during "
2922                            "acquire next image.",
2923                            apiCall, HandleToUint64(fence));
2924        }
2925    }
2926    return skip;
2927}
2928
2929static void RetireFence(layer_data *dev_data, VkFence fence) {
2930    auto pFence = GetFenceNode(dev_data, fence);
2931    if (pFence->signaler.first != VK_NULL_HANDLE) {
2932        // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
2933        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
2934    } else {
2935        // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
2936        // the fence as retired.
2937        pFence->state = FENCE_RETIRED;
2938    }
2939}
2940
2941static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
2942    if (dev_data->instance_data->disabled.wait_for_fences) return false;
2943    bool skip = false;
2944    for (uint32_t i = 0; i < fence_count; i++) {
2945        skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
2946        skip |= VerifyQueueStateToFence(dev_data, fences[i]);
2947    }
2948    return skip;
2949}
2950
2951static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
2952    // When we know that all fences are complete we can clean/remove their CBs
2953    if ((VK_TRUE == wait_all) || (1 == fence_count)) {
2954        for (uint32_t i = 0; i < fence_count; i++) {
2955            RetireFence(dev_data, fences[i]);
2956        }
2957    }
2958    // NOTE : Alternate case not handled here is when some fences have completed. In
2959    //  this case for app to guarantee which fences completed it will have to call
2960    //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
2961}
2962
2963VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
2964                                             uint64_t timeout) {
2965    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2966    // Verify fence status of submitted fences
2967    std::unique_lock<std::mutex> lock(global_lock);
2968    bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
2969    lock.unlock();
2970    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2971
2972    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
2973
2974    if (result == VK_SUCCESS) {
2975        lock.lock();
2976        PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
2977        lock.unlock();
2978    }
2979    return result;
2980}
2981
2982static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
2983    if (dev_data->instance_data->disabled.get_fence_state) return false;
2984    return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
2985}
2986
2987static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
2988
2989VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
2990    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2991    std::unique_lock<std::mutex> lock(global_lock);
2992    bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
2993    lock.unlock();
2994    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2995
2996    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
2997    if (result == VK_SUCCESS) {
2998        lock.lock();
2999        PostCallRecordGetFenceStatus(dev_data, fence);
3000        lock.unlock();
3001    }
3002    return result;
3003}
3004
3005static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
3006    // Add queue to tracking set only if it is new
3007    auto result = dev_data->queues.emplace(queue);
3008    if (result.second == true) {
3009        QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
3010        queue_state->queue = queue;
3011        queue_state->queueFamilyIndex = q_family_index;
3012        queue_state->seq = 0;
3013    }
3014}
3015
3016VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3017    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3018    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
3019    std::lock_guard<std::mutex> lock(global_lock);
3020
3021    PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
3022}
3023
3024static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
3025    *queue_state = GetQueueState(dev_data, queue);
3026    if (dev_data->instance_data->disabled.queue_wait_idle) return false;
3027    return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
3028}
3029
3030static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
3031    RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
3032}
3033
3034VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
3035    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3036    QUEUE_STATE *queue_state = nullptr;
3037    std::unique_lock<std::mutex> lock(global_lock);
3038    bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
3039    lock.unlock();
3040    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3041    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
3042    if (VK_SUCCESS == result) {
3043        lock.lock();
3044        PostCallRecordQueueWaitIdle(dev_data, queue_state);
3045        lock.unlock();
3046    }
3047    return result;
3048}
3049
3050static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
3051    if (dev_data->instance_data->disabled.device_wait_idle) return false;
3052    bool skip = false;
3053    for (auto &queue : dev_data->queueMap) {
3054        skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3055    }
3056    return skip;
3057}
3058
3059static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
3060    for (auto &queue : dev_data->queueMap) {
3061        RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3062    }
3063}
3064
3065VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
3066    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3067    std::unique_lock<std::mutex> lock(global_lock);
3068    bool skip = PreCallValidateDeviceWaitIdle(dev_data);
3069    lock.unlock();
3070    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3071    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
3072    if (VK_SUCCESS == result) {
3073        lock.lock();
3074        PostCallRecordDeviceWaitIdle(dev_data);
3075        lock.unlock();
3076    }
3077    return result;
3078}
3079
3080static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
3081    *fence_node = GetFenceNode(dev_data, fence);
3082    *obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
3083    if (dev_data->instance_data->disabled.destroy_fence) return false;
3084    bool skip = false;
3085    if (*fence_node) {
3086        if ((*fence_node)->state == FENCE_INFLIGHT) {
3087            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3088                            HandleToUint64(fence), __LINE__, VALIDATION_ERROR_24e008c0, "DS", "Fence 0x%" PRIx64 " is in use. %s",
3089                            HandleToUint64(fence), validation_error_map[VALIDATION_ERROR_24e008c0]);
3090        }
3091    }
3092    return skip;
3093}
3094
3095static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
3096
3097VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
3098    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3099    // Common data objects used pre & post call
3100    FENCE_NODE *fence_node = nullptr;
3101    VK_OBJECT obj_struct;
3102    std::unique_lock<std::mutex> lock(global_lock);
3103    bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
3104
3105    if (!skip) {
3106        lock.unlock();
3107        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
3108        lock.lock();
3109        PostCallRecordDestroyFence(dev_data, fence);
3110    }
3111}
3112
3113static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
3114                                            VK_OBJECT *obj_struct) {
3115    *sema_node = GetSemaphoreNode(dev_data, semaphore);
3116    *obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
3117    if (dev_data->instance_data->disabled.destroy_semaphore) return false;
3118    bool skip = false;
3119    if (*sema_node) {
3120        skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_268008e2);
3121    }
3122    return skip;
3123}
3124
3125static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
3126
3127VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
3128    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3129    SEMAPHORE_NODE *sema_node;
3130    VK_OBJECT obj_struct;
3131    std::unique_lock<std::mutex> lock(global_lock);
3132    bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
3133    if (!skip) {
3134        lock.unlock();
3135        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
3136        lock.lock();
3137        PostCallRecordDestroySemaphore(dev_data, semaphore);
3138    }
3139}
3140
3141static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
3142    *event_state = GetEventNode(dev_data, event);
3143    *obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
3144    if (dev_data->instance_data->disabled.destroy_event) return false;
3145    bool skip = false;
3146    if (*event_state) {
3147        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_24c008f2);
3148    }
3149    return skip;
3150}
3151
3152static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
3153    invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
3154    dev_data->eventMap.erase(event);
3155}
3156
3157VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3158    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3159    EVENT_STATE *event_state = nullptr;
3160    VK_OBJECT obj_struct;
3161    std::unique_lock<std::mutex> lock(global_lock);
3162    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
3163    if (!skip) {
3164        lock.unlock();
3165        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
3166        lock.lock();
3167        if (event != VK_NULL_HANDLE) {
3168            PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
3169        }
3170    }
3171}
3172
3173static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
3174                                            VK_OBJECT *obj_struct) {
3175    *qp_state = GetQueryPoolNode(dev_data, query_pool);
3176    *obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
3177    if (dev_data->instance_data->disabled.destroy_query_pool) return false;
3178    bool skip = false;
3179    if (*qp_state) {
3180        skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_26200632);
3181    }
3182    return skip;
3183}
3184
3185static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
3186                                           VK_OBJECT obj_struct) {
3187    invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
3188    dev_data->queryPoolMap.erase(query_pool);
3189}
3190
3191VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
3192    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3193    QUERY_POOL_NODE *qp_state = nullptr;
3194    VK_OBJECT obj_struct;
3195    std::unique_lock<std::mutex> lock(global_lock);
3196    bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
3197    if (!skip) {
3198        lock.unlock();
3199        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
3200        lock.lock();
3201        if (queryPool != VK_NULL_HANDLE) {
3202            PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
3203        }
3204    }
3205}
3206static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3207                                               uint32_t query_count, VkQueryResultFlags flags,
3208                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3209    // TODO: clean this up, it's insanely wasteful.
3210    for (auto cmd_buffer : dev_data->commandBufferMap) {
3211        if (cmd_buffer.second->in_use.load()) {
3212            for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
3213                (*queries_in_flight)[query_state_pair.first].push_back(
3214                    cmd_buffer.first);
3215            }
3216        }
3217    }
3218    if (dev_data->instance_data->disabled.get_query_pool_results) return false;
3219    bool skip = false;
3220    for (uint32_t i = 0; i < query_count; ++i) {
3221        QueryObject query = {query_pool, first_query + i};
3222        auto qif_pair = queries_in_flight->find(query);
3223        auto query_state_pair = dev_data->queryToStateMap.find(query);
3224        if (query_state_pair != dev_data->queryToStateMap.end()) {
3225            // Available and in flight
3226            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3227                query_state_pair->second) {
3228                for (auto cmd_buffer : qif_pair->second) {
3229                    auto cb = GetCBNode(dev_data, cmd_buffer);
3230                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3231                    if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
3232                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3233                                        VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3234                                        "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
3235                                        HandleToUint64(query_pool), first_query + i);
3236                    }
3237                }
3238                // Unavailable and in flight
3239            } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3240                       !query_state_pair->second) {
3241                // TODO : Can there be the same query in use by multiple command buffers in flight?
3242                bool make_available = false;
3243                for (auto cmd_buffer : qif_pair->second) {
3244                    auto cb = GetCBNode(dev_data, cmd_buffer);
3245                    make_available |= cb->queryToStateMap[query];
3246                }
3247                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
3248                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3249                                    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3250                                    "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3251                                    HandleToUint64(query_pool), first_query + i);
3252                }
3253                // Unavailable
3254            } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
3255                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3256                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3257                                "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3258                                HandleToUint64(query_pool), first_query + i);
3259                // Uninitialized
3260            } else if (query_state_pair == dev_data->queryToStateMap.end()) {
3261                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3262                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3263                                "Cannot get query results on queryPool 0x%" PRIx64
3264                                " with index %d as data has not been collected for this index.",
3265                                HandleToUint64(query_pool), first_query + i);
3266            }
3267        }
3268    }
3269    return skip;
3270}
3271
3272static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3273                                              uint32_t query_count,
3274                                              unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3275    for (uint32_t i = 0; i < query_count; ++i) {
3276        QueryObject query = {query_pool, first_query + i};
3277        auto qif_pair = queries_in_flight->find(query);
3278        auto query_state_pair = dev_data->queryToStateMap.find(query);
3279        if (query_state_pair != dev_data->queryToStateMap.end()) {
3280            // Available and in flight
3281            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3282                query_state_pair->second) {
3283                for (auto cmd_buffer : qif_pair->second) {
3284                    auto cb = GetCBNode(dev_data, cmd_buffer);
3285                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3286                    if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
3287                        for (auto event : query_event_pair->second) {
3288                            dev_data->eventMap[event].needsSignaled = true;
3289                        }
3290                    }
3291                }
3292            }
3293        }
3294    }
3295}
3296
3297VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
3298                                                   size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
3299    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3300    unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
3301    std::unique_lock<std::mutex> lock(global_lock);
3302    bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
3303    lock.unlock();
3304    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3305    VkResult result =
3306        dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
3307    lock.lock();
3308    PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
3309    lock.unlock();
3310    return result;
3311}
3312
3313// Return true if given ranges intersect, else false
3314// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
3315//  in an error so not checking that here
3316// pad_ranges bool indicates a linear and non-linear comparison which requires padding
3317// In the case where padding is required, if an alias is encountered then a validation error is reported and skip
3318//  may be set by the callback function so caller should merge in skip value if padding case is possible.
3319// This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
3320static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
3321                            bool skip_checks) {
3322    *skip = false;
3323    auto r1_start = range1->start;
3324    auto r1_end = range1->end;
3325    auto r2_start = range2->start;
3326    auto r2_end = range2->end;
3327    VkDeviceSize pad_align = 1;
3328    if (range1->linear != range2->linear) {
3329        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
3330    }
3331    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
3332    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
3333
3334    if (!skip_checks && (range1->linear != range2->linear)) {
3335        // In linear vs. non-linear case, warn of aliasing
3336        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
3337        const char *r1_type_str = range1->image ? "image" : "buffer";
3338        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
3339        const char *r2_type_str = range2->image ? "image" : "buffer";
3340        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
3341        *skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
3342                         MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
3343                                                           " which may indicate a bug. For further info refer to the "
3344                                                           "Buffer-Image Granularity section of the Vulkan specification. "
3345                                                           "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
3346                                                           "xhtml/vkspec.html#resources-bufferimagegranularity)",
3347                         r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
3348    }
3349    // Ranges intersect
3350    return true;
3351}
3352// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
3353bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
3354    // Create a local MEMORY_RANGE struct to wrap offset/size
3355    MEMORY_RANGE range_wrap;
3356    // Synch linear with range1 to avoid padding and potential validation error case
3357    range_wrap.linear = range1->linear;
3358    range_wrap.start = offset;
3359    range_wrap.end = end;
3360    bool tmp_bool;
3361    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
3362}
3363// For given mem_info, set all ranges valid that intersect [offset-end] range
3364// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
3365static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
3366    bool tmp_bool = false;
3367    MEMORY_RANGE map_range = {};
3368    map_range.linear = true;
3369    map_range.start = offset;
3370    map_range.end = end;
3371    for (auto &handle_range_pair : mem_info->bound_ranges) {
3372        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool, false)) {
3373            // TODO : WARN here if tmp_bool true?
3374            handle_range_pair.second.valid = true;
3375        }
3376    }
3377}
3378
3379static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
3380                                      VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
3381                                      bool is_linear, const char *api_name) {
3382    bool skip = false;
3383
3384    MEMORY_RANGE range;
3385    range.image = is_image;
3386    range.handle = handle;
3387    range.linear = is_linear;
3388    range.valid = mem_info->global_valid;
3389    range.memory = mem_info->mem;
3390    range.start = memoryOffset;
3391    range.size = memRequirements.size;
3392    range.end = memoryOffset + memRequirements.size - 1;
3393    range.aliases.clear();
3394
3395    // Check for aliasing problems.
3396    for (auto &obj_range_pair : mem_info->bound_ranges) {
3397        auto check_range = &obj_range_pair.second;
3398        bool intersection_error = false;
3399        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
3400            skip |= intersection_error;
3401            range.aliases.insert(check_range);
3402        }
3403    }
3404
3405    if (memoryOffset >= mem_info->alloc_info.allocationSize) {
3406        UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_1740082c : VALIDATION_ERROR_1700080e;
3407        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3408                       HandleToUint64(mem_info->mem), __LINE__, error_code, "MEM",
3409                       "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
3410                       "), memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ". %s",
3411                       api_name, HandleToUint64(mem_info->mem), handle, memoryOffset, mem_info->alloc_info.allocationSize,
3412                       validation_error_map[error_code]);
3413    }
3414
3415    return skip;
3416}
3417
3418// Object with given handle is being bound to memory w/ given mem_info struct.
3419//  Track the newly bound memory range with given memoryOffset
3420//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
3421//  and non-linear range incorrectly overlap.
3422// Return true if an error is flagged and the user callback returns "true", otherwise false
3423// is_image indicates an image object, otherwise handle is for a buffer
3424// is_linear indicates a buffer or linear image
3425static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
3426                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
3427    MEMORY_RANGE range;
3428
3429    range.image = is_image;
3430    range.handle = handle;
3431    range.linear = is_linear;
3432    range.valid = mem_info->global_valid;
3433    range.memory = mem_info->mem;
3434    range.start = memoryOffset;
3435    range.size = memRequirements.size;
3436    range.end = memoryOffset + memRequirements.size - 1;
3437    range.aliases.clear();
3438    // Update Memory aliasing
3439    // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
3440    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
3441    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
3442    for (auto &obj_range_pair : mem_info->bound_ranges) {
3443        auto check_range = &obj_range_pair.second;
3444        bool intersection_error = false;
3445        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
3446            range.aliases.insert(check_range);
3447            tmp_alias_ranges.insert(check_range);
3448        }
3449    }
3450    mem_info->bound_ranges[handle] = std::move(range);
3451    for (auto tmp_range : tmp_alias_ranges) {
3452        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
3453    }
3454    if (is_image)
3455        mem_info->bound_images.insert(handle);
3456    else
3457        mem_info->bound_buffers.insert(handle);
3458}
3459
3460static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
3461                                           VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
3462                                           const char *api_name) {
3463    return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
3464}
3465static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3466                                   VkMemoryRequirements mem_reqs, bool is_linear) {
3467    InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
3468}
3469
3470static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
3471                                            VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
3472    return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
3473}
3474static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3475                                    VkMemoryRequirements mem_reqs) {
3476    InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
3477}
3478
3479// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
3480//  is_image indicates if handle is for image or buffer
3481//  This function will also remove the handle-to-index mapping from the appropriate
3482//  map and clean up any aliases for range being removed.
3483static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
3484    auto erase_range = &mem_info->bound_ranges[handle];
3485    for (auto alias_range : erase_range->aliases) {
3486        alias_range->aliases.erase(erase_range);
3487    }
3488    erase_range->aliases.clear();
3489    mem_info->bound_ranges.erase(handle);
3490    if (is_image) {
3491        mem_info->bound_images.erase(handle);
3492    } else {
3493        mem_info->bound_buffers.erase(handle);
3494    }
3495}
3496
3497void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
3498
3499void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
3500
3501VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
3502    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3503    BUFFER_STATE *buffer_state = nullptr;
3504    VK_OBJECT obj_struct;
3505    std::unique_lock<std::mutex> lock(global_lock);
3506    bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
3507    if (!skip) {
3508        lock.unlock();
3509        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
3510        lock.lock();
3511        if (buffer != VK_NULL_HANDLE) {
3512            PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
3513        }
3514    }
3515}
3516
3517VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
3518    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3519    // Common data objects used pre & post call
3520    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
3521    VK_OBJECT obj_struct;
3522    std::unique_lock<std::mutex> lock(global_lock);
3523    // Validate state before calling down chain, update common data if we'll be calling down chain
3524    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
3525    if (!skip) {
3526        lock.unlock();
3527        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
3528        lock.lock();
3529        if (bufferView != VK_NULL_HANDLE) {
3530            PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
3531        }
3532    }
3533}
3534
3535VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
3536    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3537    IMAGE_STATE *image_state = nullptr;
3538    VK_OBJECT obj_struct;
3539    std::unique_lock<std::mutex> lock(global_lock);
3540    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
3541    if (!skip) {
3542        lock.unlock();
3543        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
3544        lock.lock();
3545        if (image != VK_NULL_HANDLE) {
3546            PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
3547        }
3548    }
3549}
3550
3551static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
3552                                const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3553    bool skip = false;
3554    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
3555        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3556                       HandleToUint64(mem_info->mem), __LINE__, msgCode, "MT",
3557                       "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
3558                       "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
3559                       funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem),
3560                       validation_error_map[msgCode]);
3561    }
3562    return skip;
3563}
3564
3565static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3566                                            VkDeviceSize memoryOffset) {
3567    bool skip = false;
3568    if (buffer_state) {
3569        std::unique_lock<std::mutex> lock(global_lock);
3570        // Track objects tied to memory
3571        uint64_t buffer_handle = HandleToUint64(buffer);
3572        skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
3573        if (!buffer_state->memory_requirements_checked) {
3574            // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
3575            // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
3576            // vkGetBufferMemoryRequirements()
3577            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3578                            buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
3579                            "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
3580                            " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
3581                            buffer_handle);
3582            // Make the call for them so we can verify the state
3583            lock.unlock();
3584            dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
3585            lock.lock();
3586        }
3587
3588        // Validate bound memory range information
3589        auto mem_info = GetMemObjInfo(dev_data, mem);
3590        if (mem_info) {
3591            skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements,
3592                                                    "vkBindBufferMemory()");
3593            skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
3594                                        VALIDATION_ERROR_17000816);
3595        }
3596
3597        // Validate memory requirements alignment
3598        if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
3599            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3600                            buffer_handle, __LINE__, VALIDATION_ERROR_17000818, "DS",
3601                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64
3602                            " but must be an integer multiple of the "
3603                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
3604                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3605                            memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_17000818]);
3606        }
3607
3608        // Validate memory requirements size
3609        if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
3610            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3611                            buffer_handle, __LINE__, VALIDATION_ERROR_1700081a, "DS",
3612                            "vkBindBufferMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
3613                            " but must be at least as large as "
3614                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
3615                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3616                            mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size,
3617                            validation_error_map[VALIDATION_ERROR_1700081a]);
3618        }
3619
3620        // Validate device limits alignments
3621        static const VkBufferUsageFlagBits usage_list[3] = {
3622            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
3623            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
3624        static const char *memory_type[3] = {"texel", "uniform", "storage"};
3625        static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
3626                                             "minStorageBufferOffsetAlignment"};
3627
3628        // TODO:  vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
3629        // clang-format off
3630        static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_17000810, VALIDATION_ERROR_17000812,
3631            VALIDATION_ERROR_17000814 };
3632        // clang-format on
3633
3634        // Keep this one fresh!
3635        const VkDeviceSize offset_requirement[3] = {
3636            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
3637            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
3638            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
3639        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
3640
3641        for (int i = 0; i < 3; i++) {
3642            if (usage & usage_list[i]) {
3643                if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
3644                    skip |= log_msg(
3645                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
3646                        __LINE__, msgCode[i], "DS", "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64
3647                                                    " but must be a multiple of "
3648                                                    "device limit %s 0x%" PRIxLEAST64 ". %s",
3649                        memory_type[i], memoryOffset, offset_name[i], offset_requirement[i], validation_error_map[msgCode[i]]);
3650                }
3651            }
3652        }
3653    }
3654    return skip;
3655}
3656
3657static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3658                                           VkDeviceSize memoryOffset) {
3659    if (buffer_state) {
3660        std::unique_lock<std::mutex> lock(global_lock);
3661        // Track bound memory range information
3662        auto mem_info = GetMemObjInfo(dev_data, mem);
3663        if (mem_info) {
3664            InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
3665        }
3666
3667        // Track objects tied to memory
3668        uint64_t buffer_handle = HandleToUint64(buffer);
3669        SetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
3670
3671        buffer_state->binding.mem = mem;
3672        buffer_state->binding.offset = memoryOffset;
3673        buffer_state->binding.size = buffer_state->requirements.size;
3674    }
3675}
3676
3677VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
3678    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3679    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3680    auto buffer_state = GetBufferState(dev_data, buffer);
3681    bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
3682    if (!skip) {
3683        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
3684        if (result == VK_SUCCESS) {
3685            PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
3686        }
3687    }
3688    return result;
3689}
3690
3691VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
3692                                                       VkMemoryRequirements *pMemoryRequirements) {
3693    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3694    dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
3695    auto buffer_state = GetBufferState(dev_data, buffer);
3696    if (buffer_state) {
3697        buffer_state->requirements = *pMemoryRequirements;
3698        buffer_state->memory_requirements_checked = true;
3699    }
3700}
3701
3702VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
3703    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3704    dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
3705    auto image_state = GetImageState(dev_data, image);
3706    if (image_state) {
3707        image_state->requirements = *pMemoryRequirements;
3708        image_state->memory_requirements_checked = true;
3709    }
3710}
3711
3712VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
3713    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3714    // Common data objects used pre & post call
3715    IMAGE_VIEW_STATE *image_view_state = nullptr;
3716    VK_OBJECT obj_struct;
3717    std::unique_lock<std::mutex> lock(global_lock);
3718    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
3719    if (!skip) {
3720        lock.unlock();
3721        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
3722        lock.lock();
3723        if (imageView != VK_NULL_HANDLE) {
3724            PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
3725        }
3726    }
3727}
3728
3729VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
3730                                               const VkAllocationCallbacks *pAllocator) {
3731    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3732
3733    std::unique_lock<std::mutex> lock(global_lock);
3734    dev_data->shaderModuleMap.erase(shaderModule);
3735    lock.unlock();
3736
3737    dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
3738}
3739
3740static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
3741                                           VK_OBJECT *obj_struct) {
3742    *pipeline_state = getPipelineState(dev_data, pipeline);
3743    *obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
3744    if (dev_data->instance_data->disabled.destroy_pipeline) return false;
3745    bool skip = false;
3746    if (*pipeline_state) {
3747        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_25c005fa);
3748    }
3749    return skip;
3750}
3751
3752static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
3753                                          VK_OBJECT obj_struct) {
3754    // Any bound cmd buffers are now invalid
3755    invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
3756    delete getPipelineState(dev_data, pipeline);
3757    dev_data->pipelineMap.erase(pipeline);
3758}
3759
3760VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
3761    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3762    PIPELINE_STATE *pipeline_state = nullptr;
3763    VK_OBJECT obj_struct;
3764    std::unique_lock<std::mutex> lock(global_lock);
3765    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
3766    if (!skip) {
3767        lock.unlock();
3768        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
3769        lock.lock();
3770        if (pipeline != VK_NULL_HANDLE) {
3771            PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
3772        }
3773    }
3774}
3775
3776VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
3777                                                 const VkAllocationCallbacks *pAllocator) {
3778    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3779    std::unique_lock<std::mutex> lock(global_lock);
3780    dev_data->pipelineLayoutMap.erase(pipelineLayout);
3781    lock.unlock();
3782
3783    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
3784}
3785
3786static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
3787                                          VK_OBJECT *obj_struct) {
3788    *sampler_state = GetSamplerState(dev_data, sampler);
3789    *obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
3790    if (dev_data->instance_data->disabled.destroy_sampler) return false;
3791    bool skip = false;
3792    if (*sampler_state) {
3793        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_26600874);
3794    }
3795    return skip;
3796}
3797
3798static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
3799                                         VK_OBJECT obj_struct) {
3800    // Any bound cmd buffers are now invalid
3801    if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
3802    dev_data->samplerMap.erase(sampler);
3803}
3804
3805VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
3806    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3807    SAMPLER_STATE *sampler_state = nullptr;
3808    VK_OBJECT obj_struct;
3809    std::unique_lock<std::mutex> lock(global_lock);
3810    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
3811    if (!skip) {
3812        lock.unlock();
3813        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
3814        lock.lock();
3815        if (sampler != VK_NULL_HANDLE) {
3816            PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
3817        }
3818    }
3819}
3820
3821static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
3822    dev_data->descriptorSetLayoutMap.erase(ds_layout);
3823}
3824
3825VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
3826                                                      const VkAllocationCallbacks *pAllocator) {
3827    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3828    dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
3829    std::unique_lock<std::mutex> lock(global_lock);
3830    PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
3831}
3832
3833static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
3834                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
3835    *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
3836    *obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
3837    if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
3838    bool skip = false;
3839    if (*desc_pool_state) {
3840        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_2440025e);
3841    }
3842    return skip;
3843}
3844
3845static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
3846                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
3847    // Any bound cmd buffers are now invalid
3848    invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
3849    // Free sets that were in this pool
3850    for (auto ds : desc_pool_state->sets) {
3851        freeDescriptorSet(dev_data, ds);
3852    }
3853    dev_data->descriptorPoolMap.erase(descriptorPool);
3854    delete desc_pool_state;
3855}
3856
3857VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
3858                                                 const VkAllocationCallbacks *pAllocator) {
3859    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3860    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
3861    VK_OBJECT obj_struct;
3862    std::unique_lock<std::mutex> lock(global_lock);
3863    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
3864    if (!skip) {
3865        lock.unlock();
3866        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
3867        lock.lock();
3868        if (descriptorPool != VK_NULL_HANDLE) {
3869            PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
3870        }
3871    }
3872}
3873// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
3874//  If this is a secondary command buffer, then make sure its primary is also in-flight
3875//  If primary is not in-flight, then remove secondary from global in-flight set
3876// This function is only valid at a point when cmdBuffer is being reset or freed
3877static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
3878                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
3879    bool skip = false;
3880    if (cb_node->in_use.load()) {
3881        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3882                        HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
3883                        "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
3884                        validation_error_map[error_code]);
3885    }
3886    return skip;
3887}
3888
3889// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
3890static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
3891                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
3892    bool skip = false;
3893    for (auto cmd_buffer : pPool->commandBuffers) {
3894        skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
3895    }
3896    return skip;
3897}
3898
3899VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
3900                                              const VkCommandBuffer *pCommandBuffers) {
3901    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3902    bool skip = false;
3903    std::unique_lock<std::mutex> lock(global_lock);
3904
3905    for (uint32_t i = 0; i < commandBufferCount; i++) {
3906        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
3907        // Delete CB information structure, and remove from commandBufferMap
3908        if (cb_node) {
3909            skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_2840005e);
3910        }
3911    }
3912
3913    if (skip) return;
3914
3915    auto pPool = GetCommandPoolNode(dev_data, commandPool);
3916    for (uint32_t i = 0; i < commandBufferCount; i++) {
3917        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
3918        // Delete CB information structure, and remove from commandBufferMap
3919        if (cb_node) {
3920            // reset prior to delete for data clean-up
3921            // TODO: fix this, it's insane.
3922            resetCB(dev_data, cb_node->commandBuffer);
3923            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
3924            delete cb_node;
3925        }
3926
3927        // Remove commandBuffer reference from commandPoolMap
3928        pPool->commandBuffers.remove(pCommandBuffers[i]);
3929    }
3930    lock.unlock();
3931
3932    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
3933}
3934
3935VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
3936                                                 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
3937    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3938
3939    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
3940
3941    if (VK_SUCCESS == result) {
3942        std::lock_guard<std::mutex> lock(global_lock);
3943        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
3944        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
3945    }
3946    return result;
3947}
3948
3949VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
3950                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
3951    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3952    bool skip = false;
3953    if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
3954        if (!dev_data->enabled_features.pipelineStatisticsQuery) {
3955            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3956                            __LINE__, VALIDATION_ERROR_11c0062e, "DS",
3957                            "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
3958                            "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
3959                            validation_error_map[VALIDATION_ERROR_11c0062e]);
3960        }
3961    }
3962
3963    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3964    if (!skip) {
3965        result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
3966    }
3967    if (result == VK_SUCCESS) {
3968        std::lock_guard<std::mutex> lock(global_lock);
3969        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
3970        qp_node->createInfo = *pCreateInfo;
3971    }
3972    return result;
3973}
3974
3975static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
3976    *cp_state = GetCommandPoolNode(dev_data, pool);
3977    if (dev_data->instance_data->disabled.destroy_command_pool) return false;
3978    bool skip = false;
3979    if (*cp_state) {
3980        // Verify that command buffers in pool are complete (not in-flight)
3981        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_24000052);
3982    }
3983    return skip;
3984}
3985
3986static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
3987    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
3988    for (auto cb : cp_state->commandBuffers) {
3989        auto cb_node = GetCBNode(dev_data, cb);
3990        clear_cmd_buf_and_mem_references(dev_data, cb_node);
3991        // Remove references to this cb_node prior to delete
3992        // TODO : Need better solution here, resetCB?
3993        for (auto obj : cb_node->object_bindings) {
3994            removeCommandBufferBinding(dev_data, &obj, cb_node);
3995        }
3996        for (auto framebuffer : cb_node->framebuffers) {
3997            auto fb_state = GetFramebufferState(dev_data, framebuffer);
3998            if (fb_state) fb_state->cb_bindings.erase(cb_node);
3999        }
4000        dev_data->commandBufferMap.erase(cb);  // Remove this command buffer
4001        delete cb_node;                        // delete CB info structure
4002    }
4003    dev_data->commandPoolMap.erase(pool);
4004}
4005
4006// Destroy commandPool along with all of the commandBuffers allocated from that pool
4007VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
4008    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4009    COMMAND_POOL_NODE *cp_state = nullptr;
4010    std::unique_lock<std::mutex> lock(global_lock);
4011    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
4012    if (!skip) {
4013        lock.unlock();
4014        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
4015        lock.lock();
4016        if (commandPool != VK_NULL_HANDLE) {
4017            PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
4018        }
4019    }
4020}
4021
4022VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4023    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4024    bool skip = false;
4025
4026    std::unique_lock<std::mutex> lock(global_lock);
4027    auto pPool = GetCommandPoolNode(dev_data, commandPool);
4028    skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_32800050);
4029    lock.unlock();
4030
4031    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4032
4033    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
4034
4035    // Reset all of the CBs allocated from this pool
4036    if (VK_SUCCESS == result) {
4037        lock.lock();
4038        for (auto cmdBuffer : pPool->commandBuffers) {
4039            resetCB(dev_data, cmdBuffer);
4040        }
4041        lock.unlock();
4042    }
4043    return result;
4044}
4045
4046VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4047    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4048    bool skip = false;
4049    std::unique_lock<std::mutex> lock(global_lock);
4050    for (uint32_t i = 0; i < fenceCount; ++i) {
4051        auto pFence = GetFenceNode(dev_data, pFences[i]);
4052        if (pFence && pFence->state == FENCE_INFLIGHT) {
4053            skip |=
4054                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4055                        HandleToUint64(pFences[i]), __LINE__, VALIDATION_ERROR_32e008c6, "DS", "Fence 0x%" PRIx64 " is in use. %s",
4056                        HandleToUint64(pFences[i]), validation_error_map[VALIDATION_ERROR_32e008c6]);
4057        }
4058    }
4059    lock.unlock();
4060
4061    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4062
4063    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
4064
4065    if (result == VK_SUCCESS) {
4066        lock.lock();
4067        for (uint32_t i = 0; i < fenceCount; ++i) {
4068            auto pFence = GetFenceNode(dev_data, pFences[i]);
4069            if (pFence) {
4070                pFence->state = FENCE_UNSIGNALED;
4071            }
4072        }
4073        lock.unlock();
4074    }
4075
4076    return result;
4077}
4078
4079// For given cb_nodes, invalidate them and track object causing invalidation
4080void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
4081    for (auto cb_node : cb_nodes) {
4082        if (cb_node->state == CB_RECORDING) {
4083            log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4084                    HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4085                    "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
4086        }
4087        cb_node->state = CB_INVALID;
4088        cb_node->broken_bindings.push_back(obj);
4089
4090        // if secondary, then propagate the invalidation to the primaries that will call us.
4091        if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4092            invalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4093        }
4094    }
4095}
4096
4097static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
4098                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
4099    *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
4100    *obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4101    if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
4102    bool skip = false;
4103    if (*framebuffer_state) {
4104        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_250006f8);
4105    }
4106    return skip;
4107}
4108
4109static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
4110                                             VK_OBJECT obj_struct) {
4111    invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
4112    dev_data->frameBufferMap.erase(framebuffer);
4113}
4114
4115VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
4116    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4117    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
4118    VK_OBJECT obj_struct;
4119    std::unique_lock<std::mutex> lock(global_lock);
4120    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
4121    if (!skip) {
4122        lock.unlock();
4123        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
4124        lock.lock();
4125        if (framebuffer != VK_NULL_HANDLE) {
4126            PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
4127        }
4128    }
4129}
4130
4131static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
4132                                             VK_OBJECT *obj_struct) {
4133    *rp_state = GetRenderPassState(dev_data, render_pass);
4134    *obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
4135    if (dev_data->instance_data->disabled.destroy_renderpass) return false;
4136    bool skip = false;
4137    if (*rp_state) {
4138        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_264006d2);
4139    }
4140    return skip;
4141}
4142
4143static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
4144                                            VK_OBJECT obj_struct) {
4145    invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
4146    dev_data->renderPassMap.erase(render_pass);
4147}
4148
4149VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4150    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4151    RENDER_PASS_STATE *rp_state = nullptr;
4152    VK_OBJECT obj_struct;
4153    std::unique_lock<std::mutex> lock(global_lock);
4154    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
4155    if (!skip) {
4156        lock.unlock();
4157        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
4158        lock.lock();
4159        if (renderPass != VK_NULL_HANDLE) {
4160            PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
4161        }
4162    }
4163}
4164
4165VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
4166                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
4167    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4168    std::unique_lock<std::mutex> lock(global_lock);
4169    bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
4170    lock.unlock();
4171
4172    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4173    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
4174
4175    if (VK_SUCCESS == result) {
4176        lock.lock();
4177        PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
4178        lock.unlock();
4179    }
4180    return result;
4181}
4182
4183VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
4184                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
4185    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4186    std::unique_lock<std::mutex> lock(global_lock);
4187    bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
4188    lock.unlock();
4189    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4190    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
4191    if (VK_SUCCESS == result) {
4192        lock.lock();
4193        PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
4194        lock.unlock();
4195    }
4196    return result;
4197}
4198
4199// Access helper functions for external modules
4200const VkFormatProperties *GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
4201    VkFormatProperties *format_properties = new VkFormatProperties;
4202    instance_layer_data *instance_data =
4203        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4204    instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, format_properties);
4205    return format_properties;
4206}
4207
4208const VkImageFormatProperties *GetImageFormatProperties(core_validation::layer_data *device_data, VkFormat format,
4209                                                        VkImageType image_type, VkImageTiling tiling, VkImageUsageFlags usage,
4210                                                        VkImageCreateFlags flags) {
4211    VkImageFormatProperties *image_format_properties = new VkImageFormatProperties;
4212    instance_layer_data *instance_data =
4213        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4214    instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(device_data->physical_device, format, image_type, tiling,
4215                                                                         usage, flags, image_format_properties);
4216    return image_format_properties;
4217}
4218
4219const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
4220
4221const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
4222    return &device_data->phys_dev_props;
4223}
4224
4225const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
4226
4227std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
4228    return &device_data->imageMap;
4229}
4230
4231std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
4232    return &device_data->imageSubresourceMap;
4233}
4234
4235std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
4236    return &device_data->imageLayoutMap;
4237}
4238
4239std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
4240    return &device_data->imageLayoutMap;
4241}
4242
4243std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
4244    return &device_data->bufferMap;
4245}
4246
4247std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
4248    return &device_data->bufferViewMap;
4249}
4250
4251std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
4252    return &device_data->imageViewMap;
4253}
4254
4255const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) {
4256    return &device_data->phys_dev_properties;
4257}
4258
4259const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) {
4260    return &device_data->enabled_features;
4261}
4262
4263const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
4264
4265VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
4266                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
4267    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4268    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4269    bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
4270    if (!skip) {
4271        result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
4272    }
4273    if (VK_SUCCESS == result) {
4274        std::lock_guard<std::mutex> lock(global_lock);
4275        PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
4276    }
4277    return result;
4278}
4279
4280VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
4281                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
4282    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4283    std::unique_lock<std::mutex> lock(global_lock);
4284    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
4285    lock.unlock();
4286    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4287    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
4288    if (VK_SUCCESS == result) {
4289        lock.lock();
4290        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
4291        lock.unlock();
4292    }
4293
4294    return result;
4295}
4296
4297VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
4298                                           const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
4299    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4300    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
4301    if (VK_SUCCESS == result) {
4302        std::lock_guard<std::mutex> lock(global_lock);
4303        auto &fence_node = dev_data->fenceMap[*pFence];
4304        fence_node.fence = *pFence;
4305        fence_node.createInfo = *pCreateInfo;
4306        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
4307    }
4308    return result;
4309}
4310
4311// TODO handle pipeline caches
4312VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
4313                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
4314    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4315    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
4316    return result;
4317}
4318
4319VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
4320                                                const VkAllocationCallbacks *pAllocator) {
4321    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4322    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
4323}
4324
4325VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
4326                                                    void *pData) {
4327    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4328    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
4329    return result;
4330}
4331
4332VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
4333                                                   const VkPipelineCache *pSrcCaches) {
4334    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4335    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
4336    return result;
4337}
4338
4339// utility function to set collective state for pipeline
4340void set_pipeline_state(PIPELINE_STATE *pPipe) {
4341    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
4342    if (pPipe->graphicsPipelineCI.pColorBlendState) {
4343        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
4344            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
4345                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4346                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4347                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4348                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4349                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4350                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4351                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4352                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
4353                    pPipe->blendConstantsEnabled = true;
4354                }
4355            }
4356        }
4357    }
4358}
4359
4360bool validate_dual_src_blend_feature(layer_data *device_data, PIPELINE_STATE *pipe_state) {
4361    bool skip = false;
4362    if (pipe_state->graphicsPipelineCI.pColorBlendState) {
4363        for (size_t i = 0; i < pipe_state->attachments.size(); ++i) {
4364            if (!device_data->enabled_features.dualSrcBlend) {
4365                if ((pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4366                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4367                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4368                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA) ||
4369                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4370                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4371                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4372                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
4373                    skip |=
4374                        log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
4375                                HandleToUint64(pipe_state->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
4376                                "CmdBindPipeline: vkPipeline (0x%" PRIxLEAST64 ") attachment[" PRINTF_SIZE_T_SPECIFIER
4377                                "] has a dual-source blend factor but this device feature is not enabled.",
4378                                HandleToUint64(pipe_state->pipeline), i);
4379                }
4380            }
4381        }
4382    }
4383    return skip;
4384}
4385
4386static bool PreCallCreateGraphicsPipelines(layer_data *device_data, uint32_t count,
4387                                           const VkGraphicsPipelineCreateInfo *create_infos, vector<PIPELINE_STATE *> &pipe_state) {
4388    bool skip = false;
4389    instance_layer_data *instance_data =
4390        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4391
4392    for (uint32_t i = 0; i < count; i++) {
4393        skip |= verifyPipelineCreateState(device_data, pipe_state, i);
4394        if (create_infos[i].pVertexInputState != NULL) {
4395            for (uint32_t j = 0; j < create_infos[i].pVertexInputState->vertexAttributeDescriptionCount; j++) {
4396                VkFormat format = create_infos[i].pVertexInputState->pVertexAttributeDescriptions[j].format;
4397                // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
4398                VkFormatProperties properties;
4399                instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &properties);
4400                if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
4401                    skip |= log_msg(
4402                        device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4403                        __LINE__, VALIDATION_ERROR_14a004de, "IMAGE",
4404                        "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
4405                        "(%s) is not a supported vertex buffer format. %s",
4406                        i, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_14a004de]);
4407                }
4408            }
4409        }
4410    }
4411    return skip;
4412}
4413
4414VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4415                                                       const VkGraphicsPipelineCreateInfo *pCreateInfos,
4416                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4417    // TODO What to do with pipelineCache?
4418    // The order of operations here is a little convoluted but gets the job done
4419    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
4420    //  2. Create state is then validated (which uses flags setup during shadowing)
4421    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
4422    bool skip = false;
4423    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
4424    vector<PIPELINE_STATE *> pipe_state(count);
4425    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4426
4427    uint32_t i = 0;
4428    std::unique_lock<std::mutex> lock(global_lock);
4429
4430    for (i = 0; i < count; i++) {
4431        pipe_state[i] = new PIPELINE_STATE;
4432        pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
4433        pipe_state[i]->render_pass_ci.initialize(GetRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
4434        pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4435    }
4436    skip |= PreCallCreateGraphicsPipelines(dev_data, count, pCreateInfos, pipe_state);
4437
4438    if (skip) {
4439        for (i = 0; i < count; i++) {
4440            delete pipe_state[i];
4441            pPipelines[i] = VK_NULL_HANDLE;
4442        }
4443        return VK_ERROR_VALIDATION_FAILED_EXT;
4444    }
4445
4446    lock.unlock();
4447    auto result =
4448        dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4449    lock.lock();
4450    for (i = 0; i < count; i++) {
4451        if (pPipelines[i] == VK_NULL_HANDLE) {
4452            delete pipe_state[i];
4453        } else {
4454            pipe_state[i]->pipeline = pPipelines[i];
4455            dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
4456        }
4457    }
4458
4459    return result;
4460}
4461
4462VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4463                                                      const VkComputePipelineCreateInfo *pCreateInfos,
4464                                                      const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4465    bool skip = false;
4466
4467    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
4468    vector<PIPELINE_STATE *> pPipeState(count);
4469    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4470
4471    uint32_t i = 0;
4472    std::unique_lock<std::mutex> lock(global_lock);
4473    for (i = 0; i < count; i++) {
4474        // TODO: Verify compute stage bits
4475
4476        // Create and initialize internal tracking data structure
4477        pPipeState[i] = new PIPELINE_STATE;
4478        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
4479        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4480
4481        // TODO: Add Compute Pipeline Verification
4482        skip |= validate_compute_pipeline(dev_data, pPipeState[i]);
4483        // skip |= verifyPipelineCreateState(dev_data, pPipeState[i]);
4484    }
4485
4486    if (skip) {
4487        for (i = 0; i < count; i++) {
4488            // Clean up any locally allocated data structures
4489            delete pPipeState[i];
4490            pPipelines[i] = VK_NULL_HANDLE;
4491        }
4492        return VK_ERROR_VALIDATION_FAILED_EXT;
4493    }
4494
4495    lock.unlock();
4496    auto result =
4497        dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4498    lock.lock();
4499    for (i = 0; i < count; i++) {
4500        if (pPipelines[i] == VK_NULL_HANDLE) {
4501            delete pPipeState[i];
4502        } else {
4503            pPipeState[i]->pipeline = pPipelines[i];
4504            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
4505        }
4506    }
4507
4508    return result;
4509}
4510
4511VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
4512                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
4513    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4514    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
4515    if (VK_SUCCESS == result) {
4516        std::lock_guard<std::mutex> lock(global_lock);
4517        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
4518    }
4519    return result;
4520}
4521
4522static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
4523    if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
4524    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
4525}
4526
4527static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
4528                                                    VkDescriptorSetLayout set_layout) {
4529    dev_data->descriptorSetLayoutMap[set_layout] = std::unique_ptr<cvdescriptorset::DescriptorSetLayout>(
4530        new cvdescriptorset::DescriptorSetLayout(create_info, set_layout));
4531}
4532
4533VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
4534                                                         const VkAllocationCallbacks *pAllocator,
4535                                                         VkDescriptorSetLayout *pSetLayout) {
4536    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4537    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4538    std::unique_lock<std::mutex> lock(global_lock);
4539    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
4540    if (!skip) {
4541        lock.unlock();
4542        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
4543        if (VK_SUCCESS == result) {
4544            lock.lock();
4545            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
4546        }
4547    }
4548    return result;
4549}
4550
4551// Used by CreatePipelineLayout and CmdPushConstants.
4552// Note that the index argument is optional and only used by CreatePipelineLayout.
4553static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
4554                                      const char *caller_name, uint32_t index = 0) {
4555    if (dev_data->instance_data->disabled.push_constant_range) return false;
4556    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
4557    bool skip = false;
4558    // Check that offset + size don't exceed the max.
4559    // Prevent arithetic overflow here by avoiding addition and testing in this order.
4560    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
4561        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
4562        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4563            if (offset >= maxPushConstantsSize) {
4564                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4565                                __LINE__, VALIDATION_ERROR_11a0024c, "DS",
4566                                "%s call has push constants index %u with offset %u that "
4567                                "exceeds this device's maxPushConstantSize of %u. %s",
4568                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_11a0024c]);
4569            }
4570            if (size > maxPushConstantsSize - offset) {
4571                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4572                                __LINE__, VALIDATION_ERROR_11a00254, "DS",
4573                                "%s call has push constants index %u with offset %u and size %u that "
4574                                "exceeds this device's maxPushConstantSize of %u. %s",
4575                                caller_name, index, offset, size, maxPushConstantsSize,
4576                                validation_error_map[VALIDATION_ERROR_11a00254]);
4577            }
4578        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4579            if (offset >= maxPushConstantsSize) {
4580                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4581                                __LINE__, VALIDATION_ERROR_1bc002e4, "DS",
4582                                "%s call has push constants index %u with offset %u that "
4583                                "exceeds this device's maxPushConstantSize of %u. %s",
4584                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_1bc002e4]);
4585            }
4586            if (size > maxPushConstantsSize - offset) {
4587                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4588                                __LINE__, VALIDATION_ERROR_1bc002e6, "DS",
4589                                "%s call has push constants index %u with offset %u and size %u that "
4590                                "exceeds this device's maxPushConstantSize of %u. %s",
4591                                caller_name, index, offset, size, maxPushConstantsSize,
4592                                validation_error_map[VALIDATION_ERROR_1bc002e6]);
4593            }
4594        } else {
4595            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4596                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4597        }
4598    }
4599    // size needs to be non-zero and a multiple of 4.
4600    if ((size == 0) || ((size & 0x3) != 0)) {
4601        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4602            if (size == 0) {
4603                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4604                                __LINE__, VALIDATION_ERROR_11a00250, "DS",
4605                                "%s call has push constants index %u with "
4606                                "size %u. Size must be greater than zero. %s",
4607                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_11a00250]);
4608            }
4609            if (size & 0x3) {
4610                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4611                                __LINE__, VALIDATION_ERROR_11a00252, "DS",
4612                                "%s call has push constants index %u with "
4613                                "size %u. Size must be a multiple of 4. %s",
4614                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_11a00252]);
4615            }
4616        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4617            if (size == 0) {
4618                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4619                                __LINE__, VALIDATION_ERROR_1bc2c21b, "DS",
4620                                "%s call has push constants index %u with "
4621                                "size %u. Size must be greater than zero. %s",
4622                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_1bc2c21b]);
4623            }
4624            if (size & 0x3) {
4625                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4626                                __LINE__, VALIDATION_ERROR_1bc002e2, "DS",
4627                                "%s call has push constants index %u with "
4628                                "size %u. Size must be a multiple of 4. %s",
4629                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_1bc002e2]);
4630            }
4631        } else {
4632            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4633                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4634        }
4635    }
4636    // offset needs to be a multiple of 4.
4637    if ((offset & 0x3) != 0) {
4638        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4639            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4640                            __LINE__, VALIDATION_ERROR_11a0024e, "DS",
4641                            "%s call has push constants index %u with "
4642                            "offset %u. Offset must be a multiple of 4. %s",
4643                            caller_name, index, offset, validation_error_map[VALIDATION_ERROR_11a0024e]);
4644        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4645            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4646                            __LINE__, VALIDATION_ERROR_1bc002e0, "DS",
4647                            "%s call has push constants with "
4648                            "offset %u. Offset must be a multiple of 4. %s",
4649                            caller_name, offset, validation_error_map[VALIDATION_ERROR_1bc002e0]);
4650        } else {
4651            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4652                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4653        }
4654    }
4655    return skip;
4656}
4657
4658VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
4659                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
4660    bool skip = false;
4661    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4662    // TODO : Add checks for VALIDATION_ERRORS 865-870
4663    // Push Constant Range checks
4664    uint32_t i, j;
4665    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4666        skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
4667                                          pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
4668        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
4669            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4670                            __LINE__, VALIDATION_ERROR_11a2dc03, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
4671                            validation_error_map[VALIDATION_ERROR_11a2dc03]);
4672        }
4673    }
4674    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4675
4676    // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
4677    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4678        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
4679            if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
4680                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4681                                __LINE__, VALIDATION_ERROR_0fe00248, "DS",
4682                                "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d. %s", i, j,
4683                                validation_error_map[VALIDATION_ERROR_0fe00248]);
4684            }
4685        }
4686    }
4687
4688    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
4689    if (VK_SUCCESS == result) {
4690        std::lock_guard<std::mutex> lock(global_lock);
4691        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
4692        plNode.layout = *pPipelineLayout;
4693        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
4694        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
4695            plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
4696        }
4697        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
4698        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4699            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
4700        }
4701    }
4702    return result;
4703}
4704
4705VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
4706                                                    const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
4707    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4708    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
4709    if (VK_SUCCESS == result) {
4710        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
4711        if (NULL == pNewNode) {
4712            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4713                        HandleToUint64(*pDescriptorPool), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4714                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
4715                return VK_ERROR_VALIDATION_FAILED_EXT;
4716        } else {
4717            std::lock_guard<std::mutex> lock(global_lock);
4718            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
4719        }
4720    } else {
4721        // Need to do anything if pool create fails?
4722    }
4723    return result;
4724}
4725
4726VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4727                                                   VkDescriptorPoolResetFlags flags) {
4728    // TODO : Add checks for VALIDATION_ERROR_32a00272
4729    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4730    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
4731    if (VK_SUCCESS == result) {
4732        std::lock_guard<std::mutex> lock(global_lock);
4733        clearDescriptorPool(dev_data, device, descriptorPool, flags);
4734    }
4735    return result;
4736}
4737// Ensure the pool contains enough descriptors and descriptor sets to satisfy
4738// an allocation request. Fills common_data with the total number of descriptors of each type required,
4739// as well as DescriptorSetLayout ptrs used for later update.
4740static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4741                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
4742    // Always update common data
4743    cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
4744    if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
4745    // All state checks for AllocateDescriptorSets is done in single function
4746    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
4747}
4748// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
4749static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4750                                                 VkDescriptorSet *pDescriptorSets,
4751                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
4752    // All the updates are contained in a single cvdescriptorset function
4753    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
4754                                                   &dev_data->setMap, dev_data);
4755}
4756
4757// TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
4758VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4759                                                      VkDescriptorSet *pDescriptorSets) {
4760    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4761    std::unique_lock<std::mutex> lock(global_lock);
4762    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
4763    bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
4764    lock.unlock();
4765
4766    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4767
4768    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
4769
4770    if (VK_SUCCESS == result) {
4771        lock.lock();
4772        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
4773        lock.unlock();
4774    }
4775    return result;
4776}
4777// Verify state before freeing DescriptorSets
4778static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
4779                                              const VkDescriptorSet *descriptor_sets) {
4780    if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
4781    bool skip = false;
4782    // First make sure sets being destroyed are not currently in-use
4783    for (uint32_t i = 0; i < count; ++i) {
4784        if (descriptor_sets[i] != VK_NULL_HANDLE) {
4785            skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
4786        }
4787    }
4788
4789    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
4790    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
4791        // Can't Free from a NON_FREE pool
4792        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4793                        HandleToUint64(pool), __LINE__, VALIDATION_ERROR_28600270, "DS",
4794                        "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
4795                        "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
4796                        validation_error_map[VALIDATION_ERROR_28600270]);
4797    }
4798    return skip;
4799}
4800// Sets have been removed from the pool so update underlying state
4801static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
4802                                             const VkDescriptorSet *descriptor_sets) {
4803    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
4804    // Update available descriptor sets in pool
4805    pool_state->availableSets += count;
4806
4807    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
4808    for (uint32_t i = 0; i < count; ++i) {
4809        if (descriptor_sets[i] != VK_NULL_HANDLE) {
4810            auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
4811            uint32_t type_index = 0, descriptor_count = 0;
4812            for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
4813                type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
4814                descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
4815                pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
4816            }
4817            freeDescriptorSet(dev_data, descriptor_set);
4818            pool_state->sets.erase(descriptor_set);
4819        }
4820    }
4821}
4822
4823VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
4824                                                  const VkDescriptorSet *pDescriptorSets) {
4825    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4826    // Make sure that no sets being destroyed are in-flight
4827    std::unique_lock<std::mutex> lock(global_lock);
4828    bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
4829    lock.unlock();
4830
4831    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4832    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
4833    if (VK_SUCCESS == result) {
4834        lock.lock();
4835        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
4836        lock.unlock();
4837    }
4838    return result;
4839}
4840// TODO : This is a Proof-of-concept for core validation architecture
4841//  Really we'll want to break out these functions to separate files but
4842//  keeping it all together here to prove out design
4843// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
4844static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
4845                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4846                                                const VkCopyDescriptorSet *pDescriptorCopies) {
4847    if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
4848    // First thing to do is perform map look-ups.
4849    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
4850    //  so we can't just do a single map look-up up-front, but do them individually in functions below
4851
4852    // Now make call(s) that validate state, but don't perform state updates in this function
4853    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
4854    //  namespace which will parse params and make calls into specific class instances
4855    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
4856                                                         descriptorCopyCount, pDescriptorCopies);
4857}
4858// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
4859static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
4860                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4861                                               const VkCopyDescriptorSet *pDescriptorCopies) {
4862    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4863                                                 pDescriptorCopies);
4864}
4865
4866VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
4867                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4868                                                const VkCopyDescriptorSet *pDescriptorCopies) {
4869    // Only map look-up at top level is for device-level layer_data
4870    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4871    std::unique_lock<std::mutex> lock(global_lock);
4872    bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4873                                                    pDescriptorCopies);
4874    lock.unlock();
4875    if (!skip) {
4876        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4877                                                      pDescriptorCopies);
4878        lock.lock();
4879        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
4880        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4881                                           pDescriptorCopies);
4882    }
4883}
4884
4885VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
4886                                                      VkCommandBuffer *pCommandBuffer) {
4887    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4888    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
4889    if (VK_SUCCESS == result) {
4890        std::unique_lock<std::mutex> lock(global_lock);
4891        auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
4892
4893        if (pPool) {
4894            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
4895                // Add command buffer to its commandPool map
4896                pPool->commandBuffers.push_back(pCommandBuffer[i]);
4897                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
4898                // Add command buffer to map
4899                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
4900                resetCB(dev_data, pCommandBuffer[i]);
4901                pCB->createInfo = *pCreateInfo;
4902                pCB->device = device;
4903            }
4904        }
4905        lock.unlock();
4906    }
4907    return result;
4908}
4909
4910// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
4911static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
4912    addCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
4913                            cb_state);
4914    for (auto attachment : fb_state->attachments) {
4915        auto view_state = attachment.view_state;
4916        if (view_state) {
4917            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
4918        }
4919        auto rp_state = GetRenderPassState(dev_data, fb_state->createInfo.renderPass);
4920        if (rp_state) {
4921            addCommandBufferBinding(&rp_state->cb_bindings, {HandleToUint64(rp_state->renderPass), kVulkanObjectTypeRenderPass},
4922                                    cb_state);
4923        }
4924    }
4925}
4926
4927VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
4928    bool skip = false;
4929    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
4930    std::unique_lock<std::mutex> lock(global_lock);
4931    // Validate command buffer level
4932    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
4933    if (cb_node) {
4934        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
4935        if (cb_node->in_use.load()) {
4936            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4937                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "MEM",
4938                            "Calling vkBeginCommandBuffer() on active command buffer %p before it has completed. "
4939                            "You must check command buffer fence before this call. %s",
4940                            commandBuffer, validation_error_map[VALIDATION_ERROR_16e00062]);
4941        }
4942        clear_cmd_buf_and_mem_references(dev_data, cb_node);
4943        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
4944            // Secondary Command Buffer
4945            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
4946            if (!pInfo) {
4947                skip |=
4948                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4949                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00066, "DS",
4950                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s", commandBuffer,
4951                            validation_error_map[VALIDATION_ERROR_16e00066]);
4952            } else {
4953                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
4954                    assert(pInfo->renderPass);
4955                    string errorString = "";
4956                    auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
4957                    if (framebuffer) {
4958                        if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
4959                            !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
4960                                                             GetRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
4961                                                             errorString)) {
4962                            // renderPass that framebuffer was created with must be compatible with local renderPass
4963                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4964                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4965                                            VALIDATION_ERROR_0280006e, "DS",
4966                                            "vkBeginCommandBuffer(): Secondary Command "
4967                                            "Buffer (0x%p) renderPass (0x%" PRIxLEAST64
4968                                            ") is incompatible w/ framebuffer "
4969                                            "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
4970                                            commandBuffer, HandleToUint64(pInfo->renderPass), HandleToUint64(pInfo->framebuffer),
4971                                            HandleToUint64(framebuffer->createInfo.renderPass), errorString.c_str(),
4972                                            validation_error_map[VALIDATION_ERROR_0280006e]);
4973                        }
4974                        // Connect this framebuffer and its children to this cmdBuffer
4975                        AddFramebufferBinding(dev_data, cb_node, framebuffer);
4976                    }
4977                }
4978                if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
4979                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
4980                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4981                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4982                                    VALIDATION_ERROR_16e00068, "DS",
4983                                    "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
4984                                    "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
4985                                    "support precise occlusion queries. %s",
4986                                    commandBuffer, validation_error_map[VALIDATION_ERROR_16e00068]);
4987                }
4988            }
4989            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
4990                auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
4991                if (renderPass) {
4992                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
4993                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4994                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4995                                        VALIDATION_ERROR_0280006c, "DS",
4996                                        "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
4997                                        "that is less than the number of subpasses (%d). %s",
4998                                        commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
4999                                        validation_error_map[VALIDATION_ERROR_0280006c]);
5000                    }
5001                }
5002            }
5003        }
5004        if (CB_RECORDING == cb_node->state) {
5005            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5006                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "DS",
5007                            "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
5008                            ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
5009                            commandBuffer, validation_error_map[VALIDATION_ERROR_16e00062]);
5010        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->last_cmd)) {
5011            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
5012            auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5013            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5014                skip |=
5015                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5016                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00064, "DS",
5017                            "Call to vkBeginCommandBuffer() on command buffer (0x%p"
5018                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
5019                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5020                            commandBuffer, HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_16e00064]);
5021            }
5022            resetCB(dev_data, commandBuffer);
5023        }
5024        // Set updated state here in case implicit reset occurs above
5025        cb_node->state = CB_RECORDING;
5026        cb_node->beginInfo = *pBeginInfo;
5027        if (cb_node->beginInfo.pInheritanceInfo) {
5028            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
5029            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
5030            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
5031            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
5032                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5033                cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
5034                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
5035                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
5036                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
5037            }
5038        }
5039    }
5040    lock.unlock();
5041    if (skip) {
5042        return VK_ERROR_VALIDATION_FAILED_EXT;
5043    }
5044    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
5045
5046    return result;
5047}
5048
5049VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
5050    bool skip = false;
5051    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5052    std::unique_lock<std::mutex> lock(global_lock);
5053    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5054    if (pCB) {
5055        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
5056            !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5057            // This needs spec clarification to update valid usage, see comments in PR:
5058            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
5059            skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_27400078);
5060        }
5061        skip |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
5062        UpdateCmdBufferLastCmd(pCB, CMD_END);
5063        for (auto query : pCB->activeQueries) {
5064            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5065                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_2740007a, "DS",
5066                            "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
5067                            HandleToUint64(query.pool), query.index, validation_error_map[VALIDATION_ERROR_2740007a]);
5068        }
5069    }
5070    if (!skip) {
5071        lock.unlock();
5072        auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
5073        lock.lock();
5074        if (VK_SUCCESS == result) {
5075            pCB->state = CB_RECORDED;
5076        }
5077        return result;
5078    } else {
5079        return VK_ERROR_VALIDATION_FAILED_EXT;
5080    }
5081}
5082
5083VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
5084    bool skip = false;
5085    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5086    std::unique_lock<std::mutex> lock(global_lock);
5087    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5088    VkCommandPool cmdPool = pCB->createInfo.commandPool;
5089    auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5090    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5091        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5092                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_3260005c, "DS",
5093                        "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
5094                        ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5095                        commandBuffer, HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_3260005c]);
5096    }
5097    skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_3260005a);
5098    lock.unlock();
5099    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5100    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
5101    if (VK_SUCCESS == result) {
5102        lock.lock();
5103        resetCB(dev_data, commandBuffer);
5104        lock.unlock();
5105    }
5106    return result;
5107}
5108
5109VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5110                                           VkPipeline pipeline) {
5111    bool skip = false;
5112    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5113    std::unique_lock<std::mutex> lock(global_lock);
5114    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5115    if (cb_state) {
5116        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5117                                      VALIDATION_ERROR_18002415);
5118        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
5119        UpdateCmdBufferLastCmd(cb_state, CMD_BINDPIPELINE);
5120        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
5121            skip |=
5122                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5123                        HandleToUint64(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
5124                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
5125                        HandleToUint64(pipeline), HandleToUint64(cb_state->activeRenderPass->renderPass));
5126        }
5127        // TODO: VALIDATION_ERROR_18000612 VALIDATION_ERROR_18000616
5128
5129        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
5130        if (pipe_state) {
5131            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
5132            set_cb_pso_status(cb_state, pipe_state);
5133            set_pipeline_state(pipe_state);
5134            skip |= validate_dual_src_blend_feature(dev_data, pipe_state);
5135        } else {
5136            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5137                            HandleToUint64(pipeline), __LINE__, VALIDATION_ERROR_18027e01, "DS",
5138                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", HandleToUint64(pipeline),
5139                            validation_error_map[VALIDATION_ERROR_18027e01]);
5140        }
5141        addCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
5142        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
5143            // Add binding for child renderpass
5144            auto rp_state = GetRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
5145            if (rp_state) {
5146                addCommandBufferBinding(&rp_state->cb_bindings, {HandleToUint64(rp_state->renderPass), kVulkanObjectTypeRenderPass},
5147                                        cb_state);
5148            }
5149        }
5150    }
5151    lock.unlock();
5152    if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
5153}
5154
5155VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
5156                                          const VkViewport *pViewports) {
5157    bool skip = false;
5158    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5159    std::unique_lock<std::mutex> lock(global_lock);
5160    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5161    if (pCB) {
5162        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1e002415);
5163        skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
5164        UpdateCmdBufferLastCmd(pCB, CMD_SETVIEWPORTSTATE);
5165        pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
5166    }
5167    lock.unlock();
5168    if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
5169}
5170
5171VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
5172                                         const VkRect2D *pScissors) {
5173    bool skip = false;
5174    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5175    std::unique_lock<std::mutex> lock(global_lock);
5176    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5177    if (pCB) {
5178        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d802415);
5179        skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
5180        UpdateCmdBufferLastCmd(pCB, CMD_SETSCISSORSTATE);
5181        pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
5182    }
5183    lock.unlock();
5184    if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
5185}
5186
5187VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
5188    bool skip = false;
5189    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5190    std::unique_lock<std::mutex> lock(global_lock);
5191    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5192    if (pCB) {
5193        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d602415);
5194        skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
5195        UpdateCmdBufferLastCmd(pCB, CMD_SETLINEWIDTHSTATE);
5196        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
5197
5198        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
5199        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
5200            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5201                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1d600626, "DS",
5202                            "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
5203                            "flag.  This is undefined behavior and could be ignored. %s",
5204                            validation_error_map[VALIDATION_ERROR_1d600626]);
5205        } else {
5206            skip |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, kVulkanObjectTypeCommandBuffer, HandleToUint64(commandBuffer),
5207                                    lineWidth);
5208        }
5209    }
5210    lock.unlock();
5211    if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
5212}
5213
5214VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
5215                                           float depthBiasSlopeFactor) {
5216    bool skip = false;
5217    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5218    std::unique_lock<std::mutex> lock(global_lock);
5219    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5220    if (pCB) {
5221        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1cc02415);
5222        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
5223        if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
5224            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5225                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1cc0062c, "DS",
5226                            "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
5227                            "parameter must be set to 0.0. %s",
5228                            validation_error_map[VALIDATION_ERROR_1cc0062c]);
5229        }
5230        if (!skip) {
5231            UpdateCmdBufferLastCmd(pCB, CMD_SETDEPTHBIASSTATE);
5232            pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
5233        }
5234    }
5235    lock.unlock();
5236    if (!skip)
5237        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
5238}
5239
5240VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
5241    bool skip = false;
5242    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5243    std::unique_lock<std::mutex> lock(global_lock);
5244    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5245    if (pCB) {
5246        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ca02415);
5247        skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
5248        UpdateCmdBufferLastCmd(pCB, CMD_SETBLENDSTATE);
5249        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
5250    }
5251    lock.unlock();
5252    if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
5253}
5254
5255VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
5256    bool skip = false;
5257    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5258    std::unique_lock<std::mutex> lock(global_lock);
5259    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5260    if (pCB) {
5261        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ce02415);
5262        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
5263        UpdateCmdBufferLastCmd(pCB, CMD_SETDEPTHBOUNDSSTATE);
5264        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
5265    }
5266    lock.unlock();
5267    if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
5268}
5269
5270VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
5271                                                    uint32_t compareMask) {
5272    bool skip = false;
5273    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5274    std::unique_lock<std::mutex> lock(global_lock);
5275    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5276    if (pCB) {
5277        skip |=
5278            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1da02415);
5279        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
5280        UpdateCmdBufferLastCmd(pCB, CMD_SETSTENCILREADMASKSTATE);
5281        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
5282    }
5283    lock.unlock();
5284    if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
5285}
5286
5287VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
5288    bool skip = false;
5289    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5290    std::unique_lock<std::mutex> lock(global_lock);
5291    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5292    if (pCB) {
5293        skip |=
5294            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1de02415);
5295        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
5296        UpdateCmdBufferLastCmd(pCB, CMD_SETSTENCILWRITEMASKSTATE);
5297        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
5298    }
5299    lock.unlock();
5300    if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
5301}
5302
5303VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
5304    bool skip = false;
5305    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5306    std::unique_lock<std::mutex> lock(global_lock);
5307    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5308    if (pCB) {
5309        skip |=
5310            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1dc02415);
5311        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
5312        UpdateCmdBufferLastCmd(pCB, CMD_SETSTENCILREFERENCESTATE);
5313        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
5314    }
5315    lock.unlock();
5316    if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
5317}
5318
5319VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5320                                                 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
5321                                                 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
5322                                                 const uint32_t *pDynamicOffsets) {
5323    bool skip = false;
5324    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5325    std::unique_lock<std::mutex> lock(global_lock);
5326    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5327    if (cb_state) {
5328        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5329                                      VALIDATION_ERROR_17c02415);
5330        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
5331        // Track total count of dynamic descriptor types to make sure we have an offset for each one
5332        uint32_t total_dynamic_descriptors = 0;
5333        string error_string = "";
5334        uint32_t last_set_index = firstSet + setCount - 1;
5335        if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
5336            cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
5337            cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
5338        }
5339        auto old_final_bound_set = cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index];
5340        auto pipeline_layout = getPipelineLayout(dev_data, layout);
5341        for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
5342            cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(dev_data, pDescriptorSets[set_idx]);
5343            if (descriptor_set) {
5344                cb_state->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
5345                cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[set_idx + firstSet] = descriptor_set;
5346                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
5347                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]), __LINE__,
5348                                DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
5349                                HandleToUint64(pDescriptorSets[set_idx]), string_VkPipelineBindPoint(pipelineBindPoint));
5350                if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
5351                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
5352                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
5353                                    __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
5354                                    "Descriptor Set 0x%" PRIxLEAST64
5355                                    " bound but it was never updated. You may want to either update it or not bind it.",
5356                                    HandleToUint64(pDescriptorSets[set_idx]));
5357                }
5358                // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
5359                if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
5360                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5361                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
5362                                    __LINE__, VALIDATION_ERROR_17c002cc, "DS",
5363                                    "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
5364                                    "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
5365                                    set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str(),
5366                                    validation_error_map[VALIDATION_ERROR_17c002cc]);
5367                }
5368
5369                auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
5370
5371                cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx].clear();
5372
5373                if (set_dynamic_descriptor_count) {
5374                    // First make sure we won't overstep bounds of pDynamicOffsets array
5375                    if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
5376                        skip |= log_msg(
5377                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5378                            HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
5379                            "descriptorSet #%u (0x%" PRIxLEAST64
5380                            ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
5381                            "array. There must be one dynamic offset for each dynamic descriptor being bound.",
5382                            set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
5383                            (dynamicOffsetCount - total_dynamic_descriptors));
5384                    } else {  // Validate and store dynamic offsets with the set
5385                        // Validate Dynamic Offset Minimums
5386                        uint32_t cur_dyn_offset = total_dynamic_descriptors;
5387                        for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
5388                            if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
5389                                if (SafeModulo(
5390                                        pDynamicOffsets[cur_dyn_offset],
5391                                        dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
5392                                    skip |=
5393                                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5394                                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
5395                                                VALIDATION_ERROR_17c002d4, "DS",
5396                                                "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
5397                                                "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
5398                                                cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
5399                                                dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5400                                                validation_error_map[VALIDATION_ERROR_17c002d4]);
5401                                }
5402                                cur_dyn_offset++;
5403                            } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
5404                                if (SafeModulo(
5405                                        pDynamicOffsets[cur_dyn_offset],
5406                                        dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
5407                                    skip |=
5408                                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5409                                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
5410                                                VALIDATION_ERROR_17c002d4, "DS",
5411                                                "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
5412                                                "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
5413                                                cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
5414                                                dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
5415                                                validation_error_map[VALIDATION_ERROR_17c002d4]);
5416                                }
5417                                cur_dyn_offset++;
5418                            }
5419                        }
5420
5421                        cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx] =
5422                            std::vector<uint32_t>(pDynamicOffsets + total_dynamic_descriptors,
5423                                                  pDynamicOffsets + total_dynamic_descriptors + set_dynamic_descriptor_count);
5424                        // Keep running total of dynamic descriptor count to verify at the end
5425                        total_dynamic_descriptors += set_dynamic_descriptor_count;
5426                    }
5427                }
5428            } else {
5429                skip |=
5430                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5431                            HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_SET, "DS",
5432                            "Attempt to bind descriptor set 0x%" PRIxLEAST64 " that doesn't exist!",
5433                            HandleToUint64(pDescriptorSets[set_idx]));
5434            }
5435            UpdateCmdBufferLastCmd(cb_state, CMD_BINDDESCRIPTORSETS);
5436            // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
5437            if (firstSet > 0) {  // Check set #s below the first bound set
5438                for (uint32_t i = 0; i < firstSet; ++i) {
5439                    if (cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
5440                        !verify_set_layout_compatibility(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i],
5441                                                         pipeline_layout, i, error_string)) {
5442                        skip |= log_msg(
5443                            dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
5444                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5445                            HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i]), __LINE__, DRAWSTATE_NONE,
5446                            "DS", "DescriptorSet 0x%" PRIxLEAST64
5447                                  " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
5448                            HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i]), i,
5449                            HandleToUint64(layout));
5450                        cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
5451                    }
5452                }
5453            }
5454            // Check if newly last bound set invalidates any remaining bound sets
5455            if ((cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (last_set_index)) {
5456                if (old_final_bound_set &&
5457                    !verify_set_layout_compatibility(old_final_bound_set, pipeline_layout, last_set_index, error_string)) {
5458                    auto old_set = old_final_bound_set->GetSet();
5459                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
5460                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(old_set), __LINE__,
5461                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
5462                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
5463                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
5464                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
5465                                    HandleToUint64(old_set), last_set_index,
5466                                    HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index]),
5467                                    last_set_index, last_set_index + 1, HandleToUint64(layout));
5468                    cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
5469                }
5470            }
5471        }
5472        //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
5473        if (total_dynamic_descriptors != dynamicOffsetCount) {
5474            skip |=
5475                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5476                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_17c002ce, "DS",
5477                        "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
5478                        "is %u. It should exactly match the number of dynamic descriptors. %s",
5479                        setCount, total_dynamic_descriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_17c002ce]);
5480        }
5481    }
5482    lock.unlock();
5483    if (!skip)
5484        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
5485                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
5486}
5487
5488VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5489                                              VkIndexType indexType) {
5490    bool skip = false;
5491    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5492    // TODO : Somewhere need to verify that IBs have correct usage state flagged
5493    std::unique_lock<std::mutex> lock(global_lock);
5494
5495    auto buffer_state = GetBufferState(dev_data, buffer);
5496    auto cb_node = GetCBNode(dev_data, commandBuffer);
5497    if (cb_node && buffer_state) {
5498        skip |=
5499            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_17e02415);
5500        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
5501        skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_17e00364);
5502        std::function<bool()> function = [=]() {
5503            return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
5504        };
5505        cb_node->validate_functions.push_back(function);
5506        UpdateCmdBufferLastCmd(cb_node, CMD_BINDINDEXBUFFER);
5507        VkDeviceSize offset_align = 0;
5508        switch (indexType) {
5509            case VK_INDEX_TYPE_UINT16:
5510                offset_align = 2;
5511                break;
5512            case VK_INDEX_TYPE_UINT32:
5513                offset_align = 4;
5514                break;
5515            default:
5516                // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
5517                break;
5518        }
5519        if (!offset_align || (offset % offset_align)) {
5520            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5521                            HandleToUint64(commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
5522                            "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
5523                            string_VkIndexType(indexType));
5524        }
5525        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
5526    } else {
5527        assert(0);
5528    }
5529    lock.unlock();
5530    if (!skip) dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
5531}
5532
5533void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
5534    uint32_t end = firstBinding + bindingCount;
5535    if (pCB->currentDrawData.buffers.size() < end) {
5536        pCB->currentDrawData.buffers.resize(end);
5537    }
5538    for (uint32_t i = 0; i < bindingCount; ++i) {
5539        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
5540    }
5541}
5542
5543static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
5544
5545VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
5546                                                const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
5547    bool skip = false;
5548    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5549    // TODO : Somewhere need to verify that VBs have correct usage state flagged
5550    std::unique_lock<std::mutex> lock(global_lock);
5551
5552    auto cb_node = GetCBNode(dev_data, commandBuffer);
5553    if (cb_node) {
5554        skip |=
5555            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_18202415);
5556        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffers()");
5557        for (uint32_t i = 0; i < bindingCount; ++i) {
5558            auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
5559            assert(buffer_state);
5560            skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_182004e8);
5561            std::function<bool()> function = [=]() {
5562                return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
5563            };
5564            cb_node->validate_functions.push_back(function);
5565            if (pOffsets[i] >= buffer_state->createInfo.size) {
5566                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5567                                HandleToUint64(buffer_state->buffer), __LINE__, VALIDATION_ERROR_182004e4, "DS",
5568                                "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer. %s",
5569                                pOffsets[i], validation_error_map[VALIDATION_ERROR_182004e4]);
5570            }
5571        }
5572        UpdateCmdBufferLastCmd(cb_node, CMD_BINDVERTEXBUFFER);
5573        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
5574    } else {
5575        assert(0);
5576    }
5577    lock.unlock();
5578    if (!skip) dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
5579}
5580
5581// Expects global_lock to be held by caller
5582static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5583    for (auto imageView : pCB->updateImages) {
5584        auto view_state = GetImageViewState(dev_data, imageView);
5585        if (!view_state) continue;
5586
5587        auto image_state = GetImageState(dev_data, view_state->create_info.image);
5588        assert(image_state);
5589        std::function<bool()> function = [=]() {
5590            SetImageMemoryValid(dev_data, image_state, true);
5591            return false;
5592        };
5593        pCB->validate_functions.push_back(function);
5594    }
5595    for (auto buffer : pCB->updateBuffers) {
5596        auto buffer_state = GetBufferState(dev_data, buffer);
5597        assert(buffer_state);
5598        std::function<bool()> function = [=]() {
5599            SetBufferMemoryValid(dev_data, buffer_state, true);
5600            return false;
5601        };
5602        pCB->validate_functions.push_back(function);
5603    }
5604}
5605
5606// Generic function to handle validation for all CmdDraw* type functions
5607static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
5608                                CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
5609                                UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
5610                                UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
5611    bool skip = false;
5612    *cb_state = GetCBNode(dev_data, cmd_buffer);
5613    if (*cb_state) {
5614        skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
5615        skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
5616        skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
5617        skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
5618                                                                : insideRenderPass(dev_data, *cb_state, caller, msg_code);
5619    }
5620    return skip;
5621}
5622
5623// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
5624static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5625                                           CMD_TYPE cmd_type) {
5626    UpdateDrawState(dev_data, cb_state, bind_point);
5627    MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
5628    UpdateCmdBufferLastCmd(cb_state, cmd_type);
5629}
5630
5631// Generic function to handle state update for all CmdDraw* type functions
5632static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5633                                   CMD_TYPE cmd_type) {
5634    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, cmd_type);
5635    updateResourceTrackingOnDraw(cb_state);
5636    cb_state->hasDrawCmd = true;
5637}
5638
5639static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
5640                                   GLOBAL_CB_NODE **cb_state, const char *caller) {
5641    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5642                               VALIDATION_ERROR_1a202415, VALIDATION_ERROR_1a200017, VALIDATION_ERROR_1a200376);
5643}
5644
5645static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5646    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAW);
5647}
5648
5649VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
5650                                   uint32_t firstVertex, uint32_t firstInstance) {
5651    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5652    GLOBAL_CB_NODE *cb_state = nullptr;
5653    std::unique_lock<std::mutex> lock(global_lock);
5654    bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
5655    lock.unlock();
5656    if (!skip) {
5657        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
5658        lock.lock();
5659        PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
5660        lock.unlock();
5661    }
5662}
5663
5664static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
5665                                          VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
5666    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5667                               VALIDATION_ERROR_1a402415, VALIDATION_ERROR_1a400017, VALIDATION_ERROR_1a40039c);
5668}
5669
5670static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5671    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXED);
5672}
5673
5674VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
5675                                          uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
5676    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5677    GLOBAL_CB_NODE *cb_state = nullptr;
5678    std::unique_lock<std::mutex> lock(global_lock);
5679    bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
5680                                              "vkCmdDrawIndexed()");
5681    lock.unlock();
5682    if (!skip) {
5683        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
5684        lock.lock();
5685        PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
5686        lock.unlock();
5687    }
5688}
5689
5690static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5691                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
5692                                           const char *caller) {
5693    bool skip =
5694        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5695                            VALIDATION_ERROR_1aa02415, VALIDATION_ERROR_1aa00017, VALIDATION_ERROR_1aa003cc);
5696    *buffer_state = GetBufferState(dev_data, buffer);
5697    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1aa003b4);
5698    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
5699    // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
5700    return skip;
5701}
5702
5703static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5704                                          BUFFER_STATE *buffer_state) {
5705    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDIRECT);
5706    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5707}
5708
5709VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
5710                                           uint32_t stride) {
5711    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5712    GLOBAL_CB_NODE *cb_state = nullptr;
5713    BUFFER_STATE *buffer_state = nullptr;
5714    std::unique_lock<std::mutex> lock(global_lock);
5715    bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
5716                                               &buffer_state, "vkCmdDrawIndirect()");
5717    lock.unlock();
5718    if (!skip) {
5719        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
5720        lock.lock();
5721        PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
5722        lock.unlock();
5723    }
5724}
5725
5726static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5727                                                  VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
5728                                                  BUFFER_STATE **buffer_state, const char *caller) {
5729    bool skip =
5730        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
5731                            VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1a602415, VALIDATION_ERROR_1a600017, VALIDATION_ERROR_1a600434);
5732    *buffer_state = GetBufferState(dev_data, buffer);
5733    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a60041c);
5734    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
5735    // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
5736    // 'buffer'.
5737    return skip;
5738}
5739
5740static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5741                                                 BUFFER_STATE *buffer_state) {
5742    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXEDINDIRECT);
5743    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5744}
5745
5746VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5747                                                  uint32_t count, uint32_t stride) {
5748    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5749    GLOBAL_CB_NODE *cb_state = nullptr;
5750    BUFFER_STATE *buffer_state = nullptr;
5751    std::unique_lock<std::mutex> lock(global_lock);
5752    bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
5753                                                      &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
5754    lock.unlock();
5755    if (!skip) {
5756        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
5757        lock.lock();
5758        PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
5759        lock.unlock();
5760    }
5761}
5762
5763static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
5764                                       VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
5765    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
5766                               VALIDATION_ERROR_19c02415, VALIDATION_ERROR_19c00017, VALIDATION_ERROR_UNDEFINED);
5767}
5768
5769static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5770    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCH);
5771}
5772
5773VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
5774    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5775    GLOBAL_CB_NODE *cb_state = nullptr;
5776    std::unique_lock<std::mutex> lock(global_lock);
5777    bool skip =
5778        PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
5779    lock.unlock();
5780    if (!skip) {
5781        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
5782        lock.lock();
5783        PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
5784        lock.unlock();
5785    }
5786}
5787
5788static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5789                                               VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
5790                                               BUFFER_STATE **buffer_state, const char *caller) {
5791    bool skip =
5792        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
5793                            VALIDATION_ERROR_1a002415, VALIDATION_ERROR_1a000017, VALIDATION_ERROR_UNDEFINED);
5794    *buffer_state = GetBufferState(dev_data, buffer);
5795    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a000322);
5796    return skip;
5797}
5798
5799static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5800                                              BUFFER_STATE *buffer_state) {
5801    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCHINDIRECT);
5802    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5803}
5804
5805VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
5806    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5807    GLOBAL_CB_NODE *cb_state = nullptr;
5808    BUFFER_STATE *buffer_state = nullptr;
5809    std::unique_lock<std::mutex> lock(global_lock);
5810    bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
5811                                                   &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
5812    lock.unlock();
5813    if (!skip) {
5814        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
5815        lock.lock();
5816        PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
5817        lock.unlock();
5818    }
5819}
5820
5821VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
5822                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
5823    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5824    std::unique_lock<std::mutex> lock(global_lock);
5825
5826    auto cb_node = GetCBNode(device_data, commandBuffer);
5827    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
5828    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
5829
5830    if (cb_node && src_buffer_state && dst_buffer_state) {
5831        bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
5832        if (!skip) {
5833            PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
5834            lock.unlock();
5835            device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
5836        }
5837    } else {
5838        lock.unlock();
5839        assert(0);
5840    }
5841}
5842
5843VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5844                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5845                                        const VkImageCopy *pRegions) {
5846    bool skip = false;
5847    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5848    std::unique_lock<std::mutex> lock(global_lock);
5849
5850    auto cb_node = GetCBNode(device_data, commandBuffer);
5851    auto src_image_state = GetImageState(device_data, srcImage);
5852    auto dst_image_state = GetImageState(device_data, dstImage);
5853    if (cb_node && src_image_state && dst_image_state) {
5854        skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
5855                                           srcImageLayout, dstImageLayout);
5856        if (!skip) {
5857            PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
5858                                      dstImageLayout);
5859            lock.unlock();
5860            device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5861                                                     pRegions);
5862        }
5863    } else {
5864        lock.unlock();
5865        assert(0);
5866    }
5867}
5868
5869// Validate that an image's sampleCount matches the requirement for a specific API call
5870bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
5871                              const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
5872    bool skip = false;
5873    if (image_state->createInfo.samples != sample_count) {
5874        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
5875                       HandleToUint64(image_state->image), 0, msgCode, "DS",
5876                       "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
5877                       HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
5878                       string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
5879    }
5880    return skip;
5881}
5882
5883VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5884                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5885                                        const VkImageBlit *pRegions, VkFilter filter) {
5886    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5887    std::unique_lock<std::mutex> lock(global_lock);
5888
5889    auto cb_node = GetCBNode(dev_data, commandBuffer);
5890    auto src_image_state = GetImageState(dev_data, srcImage);
5891    auto dst_image_state = GetImageState(dev_data, dstImage);
5892
5893    bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, filter);
5894
5895    if (!skip) {
5896        PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state);
5897        lock.unlock();
5898        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5899                                              pRegions, filter);
5900    }
5901}
5902
5903VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
5904                                                VkImageLayout dstImageLayout, uint32_t regionCount,
5905                                                const VkBufferImageCopy *pRegions) {
5906    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5907    std::unique_lock<std::mutex> lock(global_lock);
5908    bool skip = false;
5909    auto cb_node = GetCBNode(device_data, commandBuffer);
5910    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
5911    auto dst_image_state = GetImageState(device_data, dstImage);
5912    if (cb_node && src_buffer_state && dst_image_state) {
5913        skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
5914                                                        regionCount, pRegions, "vkCmdCopyBufferToImage()");
5915    } else {
5916        lock.unlock();
5917        assert(0);
5918        // TODO: report VU01244 here, or put in object tracker?
5919    }
5920    if (!skip) {
5921        PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
5922                                          dstImageLayout);
5923        lock.unlock();
5924        device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
5925    }
5926}
5927
5928VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5929                                                VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
5930    bool skip = false;
5931    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5932    std::unique_lock<std::mutex> lock(global_lock);
5933
5934    auto cb_node = GetCBNode(device_data, commandBuffer);
5935    auto src_image_state = GetImageState(device_data, srcImage);
5936    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
5937    if (cb_node && src_image_state && dst_buffer_state) {
5938        skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
5939                                                        regionCount, pRegions, "vkCmdCopyImageToBuffer()");
5940    } else {
5941        lock.unlock();
5942        assert(0);
5943        // TODO: report VU01262 here, or put in object tracker?
5944    }
5945    if (!skip) {
5946        PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
5947                                          srcImageLayout);
5948        lock.unlock();
5949        device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
5950    }
5951}
5952
5953VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5954                                           VkDeviceSize dataSize, const uint32_t *pData) {
5955    bool skip = false;
5956    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5957    std::unique_lock<std::mutex> lock(global_lock);
5958
5959    auto cb_node = GetCBNode(dev_data, commandBuffer);
5960    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
5961    if (cb_node && dst_buff_state) {
5962        skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400046);
5963        // Update bindings between buffer and cmd buffer
5964        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
5965        // Validate that DST buffer has correct usage flags set
5966        skip |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
5967                                         VALIDATION_ERROR_1e400044, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
5968        std::function<bool()> function = [=]() {
5969            SetBufferMemoryValid(dev_data, dst_buff_state, true);
5970            return false;
5971        };
5972        cb_node->validate_functions.push_back(function);
5973
5974        skip |=
5975            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdUpdateBuffer()",
5976                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1e402415);
5977        skip |= ValidateCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
5978        UpdateCmdBufferLastCmd(cb_node, CMD_UPDATEBUFFER);
5979        skip |= insideRenderPass(dev_data, cb_node, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400017);
5980    } else {
5981        assert(0);
5982    }
5983    lock.unlock();
5984    if (!skip) dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
5985}
5986
5987VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5988                                         VkDeviceSize size, uint32_t data) {
5989    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5990    std::unique_lock<std::mutex> lock(global_lock);
5991    auto cb_node = GetCBNode(device_data, commandBuffer);
5992    auto buffer_state = GetBufferState(device_data, dstBuffer);
5993
5994    if (cb_node && buffer_state) {
5995        bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
5996        if (!skip) {
5997            PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
5998            lock.unlock();
5999            device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
6000        }
6001    } else {
6002        lock.unlock();
6003        assert(0);
6004    }
6005}
6006
6007VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
6008                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
6009                                               const VkClearRect *pRects) {
6010    bool skip = false;
6011    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6012    {
6013        std::lock_guard<std::mutex> lock(global_lock);
6014        skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6015    }
6016    if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6017}
6018
6019VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6020                                              const VkClearColorValue *pColor, uint32_t rangeCount,
6021                                              const VkImageSubresourceRange *pRanges) {
6022    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6023    std::unique_lock<std::mutex> lock(global_lock);
6024
6025    bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6026    if (!skip) {
6027        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges, CMD_CLEARCOLORIMAGE);
6028        lock.unlock();
6029        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
6030    }
6031}
6032
6033VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6034                                                     const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
6035                                                     const VkImageSubresourceRange *pRanges) {
6036    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6037    std::unique_lock<std::mutex> lock(global_lock);
6038
6039    bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6040    if (!skip) {
6041        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges, CMD_CLEARDEPTHSTENCILIMAGE);
6042        lock.unlock();
6043        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
6044    }
6045}
6046
6047VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6048                                           VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
6049                                           const VkImageResolve *pRegions) {
6050    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6051    std::unique_lock<std::mutex> lock(global_lock);
6052
6053    auto cb_node = GetCBNode(dev_data, commandBuffer);
6054    auto src_image_state = GetImageState(dev_data, srcImage);
6055    auto dst_image_state = GetImageState(dev_data, dstImage);
6056
6057    bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
6058
6059    if (!skip) {
6060        PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
6061        lock.unlock();
6062        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
6063                                                 pRegions);
6064    }
6065}
6066
6067VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
6068                                                     VkSubresourceLayout *pLayout) {
6069    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6070
6071    bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
6072    if (!skip) {
6073        device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
6074    }
6075}
6076
6077bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6078    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6079    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6080    if (pCB) {
6081        pCB->eventToStageMap[event] = stageMask;
6082    }
6083    auto queue_data = dev_data->queueMap.find(queue);
6084    if (queue_data != dev_data->queueMap.end()) {
6085        queue_data->second.eventToStageMap[event] = stageMask;
6086    }
6087    return false;
6088}
6089
6090VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6091    bool skip = false;
6092    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6093    std::unique_lock<std::mutex> lock(global_lock);
6094    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6095    if (pCB) {
6096        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6097                                      VALIDATION_ERROR_1d402415);
6098        skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
6099        UpdateCmdBufferLastCmd(pCB, CMD_SETEVENT);
6100        skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_1d400017);
6101        skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_1d4008fc,
6102                                             VALIDATION_ERROR_1d4008fe);
6103        auto event_state = GetEventNode(dev_data, event);
6104        if (event_state) {
6105            addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6106            event_state->cb_bindings.insert(pCB);
6107        }
6108        pCB->events.push_back(event);
6109        if (!pCB->waitedEvents.count(event)) {
6110            pCB->writeEventsBeforeWait.push_back(event);
6111        }
6112        pCB->eventUpdates.emplace_back([=](VkQueue q){return setEventStageMask(q, commandBuffer, event, stageMask);});
6113    }
6114    lock.unlock();
6115    if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
6116}
6117
6118VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6119    bool skip = false;
6120    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6121    std::unique_lock<std::mutex> lock(global_lock);
6122    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6123    if (pCB) {
6124        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6125                                      VALIDATION_ERROR_1c402415);
6126        skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
6127        UpdateCmdBufferLastCmd(pCB, CMD_RESETEVENT);
6128        skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_1c400017);
6129        skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_1c400904,
6130                                             VALIDATION_ERROR_1c400906);
6131        auto event_state = GetEventNode(dev_data, event);
6132        if (event_state) {
6133            addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6134            event_state->cb_bindings.insert(pCB);
6135        }
6136        pCB->events.push_back(event);
6137        if (!pCB->waitedEvents.count(event)) {
6138            pCB->writeEventsBeforeWait.push_back(event);
6139        }
6140        // TODO : Add check for VALIDATION_ERROR_32c008f8
6141        pCB->eventUpdates.emplace_back([=](VkQueue q){return setEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0));});
6142    }
6143    lock.unlock();
6144    if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
6145}
6146
6147static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
6148                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
6149                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
6150                             const VkImageMemoryBarrier *pImageMemBarriers) {
6151    bool skip = false;
6152    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(cmdBuffer), layer_data_map);
6153    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, cmdBuffer);
6154    if (pCB->activeRenderPass && memBarrierCount) {
6155        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
6156            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6157                            HandleToUint64(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6158                            "%s: Barriers cannot be set during subpass %d "
6159                            "with no self dependency specified.",
6160                            funcName, pCB->activeSubpass);
6161        }
6162    }
6163    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
6164        auto mem_barrier = &pImageMemBarriers[i];
6165        auto image_data = GetImageState(dev_data, mem_barrier->image);
6166        if (image_data) {
6167            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
6168            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
6169            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
6170                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
6171                // be VK_QUEUE_FAMILY_IGNORED
6172                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
6173                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6174                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cmdBuffer), __LINE__,
6175                                    DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image Barrier for image 0x%" PRIx64
6176                                                                         " was created with sharingMode of "
6177                                                                         "VK_SHARING_MODE_CONCURRENT. Src and dst "
6178                                                                         "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
6179                                    funcName, HandleToUint64(mem_barrier->image));
6180                }
6181            } else {
6182                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
6183                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
6184                // or both be a valid queue family
6185                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
6186                    (src_q_f_index != dst_q_f_index)) {
6187                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6188                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cmdBuffer), __LINE__,
6189                                    DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64
6190                                                                         " was created with sharingMode "
6191                                                                         "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
6192                                                                         "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
6193                                                                         "must be.",
6194                                    funcName, HandleToUint64(mem_barrier->image));
6195                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
6196                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
6197                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
6198                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6199                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cmdBuffer), __LINE__,
6200                                    DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6201                                    "%s: Image 0x%" PRIx64
6202                                    " was created with sharingMode "
6203                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
6204                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
6205                                    "queueFamilies crated for this device.",
6206                                    funcName, HandleToUint64(mem_barrier->image), src_q_f_index, dst_q_f_index,
6207                                    dev_data->phys_dev_properties.queue_family_properties.size());
6208                }
6209            }
6210        }
6211
6212        if (mem_barrier->oldLayout != mem_barrier->newLayout) {
6213            if (pCB->activeRenderPass) {
6214                skip |=
6215                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6216                            HandleToUint64(cmdBuffer), __LINE__, VALIDATION_ERROR_1b80093a, "DS",
6217                            "%s: As the Image Barrier for image 0x%" PRIx64
6218                            " is being executed within a render pass instance, oldLayout must equal newLayout yet they are "
6219                            "%s and %s. %s",
6220                            funcName, HandleToUint64(mem_barrier->image), string_VkImageLayout(mem_barrier->oldLayout),
6221                            string_VkImageLayout(mem_barrier->newLayout), validation_error_map[VALIDATION_ERROR_1b80093a]);
6222            }
6223            skip |= ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
6224            skip |= ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
6225        }
6226        if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
6227            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6228                            HandleToUint64(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6229                            "%s: Image Layout cannot be transitioned to UNDEFINED or "
6230                            "PREINITIALIZED.",
6231                            funcName);
6232        }
6233        if (image_data) {
6234            auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
6235            skip |= ValidateImageAspectMask(dev_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
6236
6237            std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
6238            skip |= ValidateImageSubresourceRange(dev_data, image_data, false, mem_barrier->subresourceRange, funcName,
6239                                                  param_name.c_str());
6240        }
6241    }
6242
6243    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
6244        auto mem_barrier = &pBufferMemBarriers[i];
6245        if (pCB->activeRenderPass) {
6246            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6247                            HandleToUint64(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6248                            "%s: Buffer Barriers cannot be used during a render pass.", funcName);
6249        }
6250        if (!mem_barrier) continue;
6251
6252        // Validate buffer barrier queue family indices
6253        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
6254             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
6255            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
6256             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
6257            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6258                            HandleToUint64(cmdBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6259                            "%s: Buffer Barrier 0x%" PRIx64
6260                            " has QueueFamilyIndex greater "
6261                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
6262                            funcName, HandleToUint64(mem_barrier->buffer),
6263                            dev_data->phys_dev_properties.queue_family_properties.size());
6264        }
6265
6266        auto buffer_state = GetBufferState(dev_data, mem_barrier->buffer);
6267        if (buffer_state) {
6268            auto buffer_size = buffer_state->requirements.size;
6269            if (mem_barrier->offset >= buffer_size) {
6270                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6271                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cmdBuffer), __LINE__,
6272                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64
6273                                                                 " which is not less than total size 0x%" PRIx64 ".",
6274                                funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
6275                                HandleToUint64(buffer_size));
6276            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
6277                skip |=
6278                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6279                            HandleToUint64(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6280                            "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
6281                            " whose sum is greater than total size 0x%" PRIx64 ".",
6282                            funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
6283                            HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
6284            }
6285        }
6286    }
6287    return skip;
6288}
6289
6290bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
6291                            VkPipelineStageFlags sourceStageMask) {
6292    bool skip = false;
6293    VkPipelineStageFlags stageMask = 0;
6294    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
6295    for (uint32_t i = 0; i < eventCount; ++i) {
6296        auto event = pCB->events[firstEventIndex + i];
6297        auto queue_data = dev_data->queueMap.find(queue);
6298        if (queue_data == dev_data->queueMap.end()) return false;
6299        auto event_data = queue_data->second.eventToStageMap.find(event);
6300        if (event_data != queue_data->second.eventToStageMap.end()) {
6301            stageMask |= event_data->second;
6302        } else {
6303            auto global_event_data = GetEventNode(dev_data, event);
6304            if (!global_event_data) {
6305                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
6306                                HandleToUint64(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
6307                                "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
6308            } else {
6309                stageMask |= global_event_data->stageMask;
6310            }
6311        }
6312    }
6313    // TODO: Need to validate that host_bit is only set if set event is called
6314    // but set event can be called at any time.
6315    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
6316        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6317                        HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1e62d401, "DS",
6318                        "Submitting cmdbuffer with call to VkCmdWaitEvents "
6319                        "using srcStageMask 0x%X which must be the bitwise "
6320                        "OR of the stageMask parameters used in calls to "
6321                        "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
6322                        "used with vkSetEvent but instead is 0x%X. %s",
6323                        sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_1e62d401]);
6324    }
6325    return skip;
6326}
6327
6328// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
6329static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
6330    {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
6331    {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
6332    {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
6333    {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6334    {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6335    {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6336    {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6337    {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6338    {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
6339    {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
6340    {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
6341    {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
6342    {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
6343    {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
6344
6345static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
6346                                                            VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
6347                                                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
6348                                                            VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
6349                                                            VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
6350                                                            VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
6351                                                            VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
6352                                                            VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
6353                                                            VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
6354                                                            VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
6355                                                            VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
6356                                                            VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
6357                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
6358                                                            VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
6359
6360bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
6361                                      VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
6362                                      UNIQUE_VALIDATION_ERROR_CODE error_code) {
6363    bool skip = false;
6364    // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
6365    for (const auto &item : stage_flag_bit_array) {
6366        if (stage_mask & item) {
6367            if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
6368                skip |=
6369                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6370                            HandleToUint64(command_buffer), __LINE__, error_code, "DL",
6371                            "%s(): %s flag %s is not compatible with the queue family properties of this "
6372                            "command buffer. %s",
6373                            function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
6374                            validation_error_map[error_code]);
6375            }
6376        }
6377    }
6378    return skip;
6379}
6380
6381bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
6382                                                VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
6383                                                const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
6384    bool skip = false;
6385    uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
6386    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
6387    auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
6388
6389    // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
6390    // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
6391    // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
6392
6393    if (queue_family_index < physical_device_state->queue_family_properties.size()) {
6394        VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
6395
6396        if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
6397            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
6398                                                     function, "srcStageMask", error_code);
6399        }
6400        if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
6401            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
6402                                                     function, "dstStageMask", error_code);
6403        }
6404    }
6405    return skip;
6406}
6407
6408VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6409                                         VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
6410                                         uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6411                                         uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6412                                         uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6413    bool skip = false;
6414    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6415    std::unique_lock<std::mutex> lock(global_lock);
6416    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6417    if (cb_state) {
6418        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
6419                                                           VALIDATION_ERROR_1e600918);
6420        skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e60090e,
6421                                             VALIDATION_ERROR_1e600912);
6422        skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e600910,
6423                                             VALIDATION_ERROR_1e600914);
6424        auto first_event_index = cb_state->events.size();
6425        for (uint32_t i = 0; i < eventCount; ++i) {
6426            auto event_state = GetEventNode(dev_data, pEvents[i]);
6427            if (event_state) {
6428                addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent}, cb_state);
6429                event_state->cb_bindings.insert(cb_state);
6430            }
6431            cb_state->waitedEvents.insert(pEvents[i]);
6432            cb_state->events.push_back(pEvents[i]);
6433        }
6434        cb_state->eventUpdates.emplace_back([=](VkQueue q){
6435            return validateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask);
6436        });
6437        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6438                                      VALIDATION_ERROR_1e602415);
6439        skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
6440        UpdateCmdBufferLastCmd(cb_state, CMD_WAITEVENTS);
6441        skip |=
6442            ValidateBarriersToImages(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
6443        if (!skip) {
6444            TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6445        }
6446
6447        skip |= ValidateBarriers("vkCmdWaitEvents()", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6448                                 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6449    }
6450    lock.unlock();
6451    if (!skip)
6452        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
6453                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6454                                               imageMemoryBarrierCount, pImageMemoryBarriers);
6455}
6456
6457static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
6458                                              VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6459                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6460                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6461                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6462    bool skip = false;
6463    skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
6464                                                       VALIDATION_ERROR_1b80093e);
6465    skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
6466                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b802415);
6467    skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
6468    skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800920,
6469                                         VALIDATION_ERROR_1b800924);
6470    skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800922,
6471                                         VALIDATION_ERROR_1b800926);
6472    skip |= ValidateBarriersToImages(device_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers,
6473                                     "vkCmdPipelineBarrier()");
6474    skip |= ValidateBarriers("vkCmdPipelineBarrier()", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6475                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6476    return skip;
6477}
6478
6479static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
6480                                            uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6481    UpdateCmdBufferLastCmd(cb_state, CMD_PIPELINEBARRIER);
6482    TransitionImageLayouts(device_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6483}
6484
6485VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
6486                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
6487                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6488                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6489                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6490    bool skip = false;
6491    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6492    std::unique_lock<std::mutex> lock(global_lock);
6493    GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
6494    if (cb_state) {
6495        skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, commandBuffer, srcStageMask, dstStageMask,
6496                                                  memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6497                                                  pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6498        if (!skip) {
6499            PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6500        }
6501    } else {
6502        assert(0);
6503    }
6504    lock.unlock();
6505    if (!skip) {
6506        device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
6507                                                       pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6508                                                       imageMemoryBarrierCount, pImageMemoryBarriers);
6509    }
6510}
6511
6512static bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
6513    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6514    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6515    if (pCB) {
6516        pCB->queryToStateMap[object] = value;
6517    }
6518    auto queue_data = dev_data->queueMap.find(queue);
6519    if (queue_data != dev_data->queueMap.end()) {
6520        queue_data->second.queryToStateMap[object] = value;
6521    }
6522    return false;
6523}
6524
6525VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
6526    bool skip = false;
6527    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6528    std::unique_lock<std::mutex> lock(global_lock);
6529    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6530    if (pCB) {
6531        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6532                                      VALIDATION_ERROR_17802415);
6533        skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
6534    }
6535    lock.unlock();
6536
6537    if (skip) return;
6538
6539    dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
6540
6541    lock.lock();
6542    if (pCB) {
6543        QueryObject query = {queryPool, slot};
6544        pCB->activeQueries.insert(query);
6545        pCB->startedQueries.insert(query);
6546        UpdateCmdBufferLastCmd(pCB, CMD_BEGINQUERY);
6547        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6548                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
6549    }
6550}
6551
6552VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
6553    bool skip = false;
6554    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6555    std::unique_lock<std::mutex> lock(global_lock);
6556    QueryObject query = {queryPool, slot};
6557    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6558    if (cb_state) {
6559        if (!cb_state->activeQueries.count(query)) {
6560            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6561                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1ae00652, "DS",
6562                            "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
6563                            HandleToUint64(queryPool), slot, validation_error_map[VALIDATION_ERROR_1ae00652]);
6564        }
6565        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6566                                      VALIDATION_ERROR_1ae02415);
6567        skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
6568    }
6569    lock.unlock();
6570
6571    if (skip) return;
6572
6573    dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
6574
6575    lock.lock();
6576    if (cb_state) {
6577        cb_state->activeQueries.erase(query);
6578        cb_state->queryUpdates.emplace_back([=](VkQueue q){return setQueryState(q, commandBuffer, query, true);});
6579        UpdateCmdBufferLastCmd(cb_state, CMD_ENDQUERY);
6580        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6581                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
6582    }
6583}
6584
6585VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
6586                                             uint32_t queryCount) {
6587    bool skip = false;
6588    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6589    std::unique_lock<std::mutex> lock(global_lock);
6590    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6591        skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_1c600017);
6592        skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
6593        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6594                                      VALIDATION_ERROR_1c602415);
6595    lock.unlock();
6596
6597    if (skip) return;
6598
6599    dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
6600
6601    lock.lock();
6602    for (uint32_t i = 0; i < queryCount; i++) {
6603        QueryObject query = {queryPool, firstQuery + i};
6604        cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
6605        cb_state->queryUpdates.emplace_back([=](VkQueue q){return setQueryState(q, commandBuffer, query, false);});
6606    }
6607    UpdateCmdBufferLastCmd(cb_state, CMD_RESETQUERYPOOL);
6608    addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6609                            {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
6610}
6611
6612static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
6613    QueryObject query = {queryPool, queryIndex};
6614    auto query_data = queue_data->queryToStateMap.find(query);
6615    if (query_data != queue_data->queryToStateMap.end()) {
6616        if (!query_data->second) return true;
6617    } else {
6618        auto it = dev_data->queryToStateMap.find(query);
6619        if (it == dev_data->queryToStateMap.end() || !it->second)
6620            return true;
6621    }
6622
6623    return false;
6624}
6625
6626static bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
6627    bool skip = false;
6628    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
6629    auto queue_data = GetQueueState(dev_data, queue);
6630    if (!queue_data) return false;
6631    for (uint32_t i = 0; i < queryCount; i++) {
6632        if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
6633            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6634                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
6635                            "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
6636                            HandleToUint64(queryPool), firstQuery + i);
6637        }
6638    }
6639    return skip;
6640}
6641
6642VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
6643                                                   uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
6644                                                   VkDeviceSize stride, VkQueryResultFlags flags) {
6645    bool skip = false;
6646    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6647    std::unique_lock<std::mutex> lock(global_lock);
6648
6649    auto cb_node = GetCBNode(dev_data, commandBuffer);
6650    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
6651    if (cb_node && dst_buff_state) {
6652        skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400674);
6653        // Validate that DST buffer has correct usage flags set
6654        skip |=
6655            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_19400672,
6656                                     "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6657        skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
6658                                      VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_19402415);
6659        skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
6660        skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400017);
6661    }
6662    lock.unlock();
6663
6664    if (skip) return;
6665
6666    dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
6667                                                     stride, flags);
6668
6669    lock.lock();
6670    if (cb_node && dst_buff_state) {
6671        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
6672        cb_node->validate_functions.emplace_back([=]() {
6673            SetBufferMemoryValid(dev_data, dst_buff_state, true);
6674            return false;
6675        });
6676        cb_node->queryUpdates.emplace_back([=](VkQueue q) {
6677            return validateQuery(q, cb_node, queryPool, firstQuery, queryCount);
6678        });
6679        UpdateCmdBufferLastCmd(cb_node, CMD_COPYQUERYPOOLRESULTS);
6680        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6681                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
6682    }
6683}
6684
6685VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
6686                                            uint32_t offset, uint32_t size, const void *pValues) {
6687    bool skip = false;
6688    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6689    std::unique_lock<std::mutex> lock(global_lock);
6690    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6691    if (cb_state) {
6692        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6693                                      VALIDATION_ERROR_1bc02415);
6694        skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
6695        UpdateCmdBufferLastCmd(cb_state, CMD_PUSHCONSTANTS);
6696    }
6697    skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
6698    if (0 == stageFlags) {
6699        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6700                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc2dc03, "DS",
6701                        "vkCmdPushConstants() call has no stageFlags set. %s", validation_error_map[VALIDATION_ERROR_1bc2dc03]);
6702    }
6703
6704    // Check if specified push constant range falls within a pipeline-defined range which has matching stageFlags.
6705    // The spec doesn't seem to disallow having multiple push constant ranges with the
6706    // same offset and size, but different stageFlags.  So we can't just check the
6707    // stageFlags in the first range with matching offset and size.
6708    if (!skip) {
6709        const auto &ranges = getPipelineLayout(dev_data, layout)->push_constant_ranges;
6710        bool found_matching_range = false;
6711        for (const auto &range : ranges) {
6712            if ((stageFlags == range.stageFlags) && (offset >= range.offset) && (offset + size <= range.offset + range.size)) {
6713                found_matching_range = true;
6714                break;
6715            }
6716        }
6717        if (!found_matching_range) {
6718            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6719                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc002de, "DS",
6720                            "vkCmdPushConstants() stageFlags = 0x%" PRIx32
6721                            " do not match the stageFlags in any of the ranges with"
6722                            " offset = %d and size = %d in pipeline layout 0x%" PRIx64 ". %s",
6723                            (uint32_t)stageFlags, offset, size, HandleToUint64(layout),
6724                            validation_error_map[VALIDATION_ERROR_1bc002de]);
6725        }
6726    }
6727    lock.unlock();
6728    if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
6729}
6730
6731VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
6732                                             VkQueryPool queryPool, uint32_t slot) {
6733    bool skip = false;
6734    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6735    std::unique_lock<std::mutex> lock(global_lock);
6736    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6737    if (cb_state) {
6738        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6739                                      VALIDATION_ERROR_1e802415);
6740        skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
6741    }
6742    lock.unlock();
6743
6744    if (skip) return;
6745
6746    dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
6747
6748    lock.lock();
6749    if (cb_state) {
6750        QueryObject query = {queryPool, slot};
6751        cb_state->queryUpdates.emplace_back([=](VkQueue q) {return setQueryState(q, commandBuffer, query, true);});
6752        UpdateCmdBufferLastCmd(cb_state, CMD_WRITETIMESTAMP);
6753    }
6754}
6755
6756static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
6757                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
6758                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
6759    bool skip = false;
6760
6761    for (uint32_t attach = 0; attach < count; attach++) {
6762        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
6763            // Attachment counts are verified elsewhere, but prevent an invalid access
6764            if (attachments[attach].attachment < fbci->attachmentCount) {
6765                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
6766                auto view_state = GetImageViewState(dev_data, *image_view);
6767                if (view_state) {
6768                    const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
6769                    if (ici != nullptr) {
6770                        if ((ici->usage & usage_flag) == 0) {
6771                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6772                                            VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, error_code, "DS",
6773                                            "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
6774                                            "IMAGE_USAGE flags (%s). %s",
6775                                            attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
6776                                            validation_error_map[error_code]);
6777                        }
6778                    }
6779                }
6780            }
6781        }
6782    }
6783    return skip;
6784}
6785
6786// Validate VkFramebufferCreateInfo which includes:
6787// 1. attachmentCount equals renderPass attachmentCount
6788// 2. corresponding framebuffer and renderpass attachments have matching formats
6789// 3. corresponding framebuffer and renderpass attachments have matching sample counts
6790// 4. fb attachments only have a single mip level
6791// 5. fb attachment dimensions are each at least as large as the fb
6792// 6. fb attachments use idenity swizzle
6793// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
6794// 8. fb dimensions are within physical device limits
6795static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
6796    bool skip = false;
6797
6798    auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
6799    if (rp_state) {
6800        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
6801        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
6802            skip |= log_msg(
6803                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6804                HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006d8, "DS",
6805                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
6806                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
6807                pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass),
6808                validation_error_map[VALIDATION_ERROR_094006d8]);
6809        } else {
6810            // attachmentCounts match, so make sure corresponding attachment details line up
6811            const VkImageView *image_views = pCreateInfo->pAttachments;
6812            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
6813                auto view_state = GetImageViewState(dev_data, image_views[i]);
6814                auto &ivci = view_state->create_info;
6815                if (ivci.format != rpci->pAttachments[i].format) {
6816                    skip |= log_msg(
6817                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6818                        HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e0, "DS",
6819                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
6820                        "the format of "
6821                        "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
6822                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
6823                        HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e0]);
6824                }
6825                const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
6826                if (ici->samples != rpci->pAttachments[i].samples) {
6827                    skip |= log_msg(
6828                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6829                        HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e2, "DS",
6830                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
6831                        "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
6832                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
6833                        HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e2]);
6834                }
6835                // Verify that view only has a single mip level
6836                if (ivci.subresourceRange.levelCount != 1) {
6837                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
6838                                    0, __LINE__, VALIDATION_ERROR_094006e6, "DS",
6839                                    "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
6840                                    "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
6841                                    i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_094006e6]);
6842                }
6843                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
6844                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
6845                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
6846                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
6847                    (mip_height < pCreateInfo->height)) {
6848                    skip |= log_msg(
6849                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6850                        VALIDATION_ERROR_094006e4, "DS",
6851                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
6852                        "than the corresponding framebuffer dimensions. Here are the respective dimensions for attachment #%u, "
6853                        "framebuffer:\n"
6854                        "width: %u, %u\n"
6855                        "height: %u, %u\n"
6856                        "layerCount: %u, %u\n%s",
6857                        i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height, pCreateInfo->height,
6858                        ivci.subresourceRange.layerCount, pCreateInfo->layers, validation_error_map[VALIDATION_ERROR_094006e4]);
6859                }
6860                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
6861                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
6862                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
6863                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
6864                    skip |= log_msg(
6865                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6866                        VALIDATION_ERROR_094006e8, "DS",
6867                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
6868                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
6869                        "r swizzle = %s\n"
6870                        "g swizzle = %s\n"
6871                        "b swizzle = %s\n"
6872                        "a swizzle = %s\n"
6873                        "%s",
6874                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
6875                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
6876                        validation_error_map[VALIDATION_ERROR_094006e8]);
6877                }
6878            }
6879        }
6880        // Verify correct attachment usage flags
6881        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
6882            // Verify input attachments:
6883            skip |=
6884                MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
6885                           pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_094006de);
6886            // Verify color attachments:
6887            skip |=
6888                MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
6889                           pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_094006da);
6890            // Verify depth/stencil attachments:
6891            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
6892                skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
6893                                   VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_094006dc);
6894            }
6895        }
6896    }
6897    // Verify FB dimensions are within physical device limits
6898    if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
6899        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6900                        VALIDATION_ERROR_094006ec, "DS",
6901                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
6902                        "Requested width: %u, device max: %u\n"
6903                        "%s",
6904                        pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
6905                        validation_error_map[VALIDATION_ERROR_094006ec]);
6906    }
6907    if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
6908        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6909                        VALIDATION_ERROR_094006f0, "DS",
6910                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
6911                        "Requested height: %u, device max: %u\n"
6912                        "%s",
6913                        pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
6914                        validation_error_map[VALIDATION_ERROR_094006f0]);
6915    }
6916    if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
6917        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6918                        VALIDATION_ERROR_094006f4, "DS",
6919                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
6920                        "Requested layers: %u, device max: %u\n"
6921                        "%s",
6922                        pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
6923                        validation_error_map[VALIDATION_ERROR_094006f4]);
6924    }
6925    // Verify FB dimensions are greater than zero
6926    if (pCreateInfo->width <= 0) {
6927        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6928                        VALIDATION_ERROR_094006ea, "DS",
6929                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero. %s",
6930                        validation_error_map[VALIDATION_ERROR_094006ea]);
6931    }
6932    if (pCreateInfo->height <= 0) {
6933        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6934                        VALIDATION_ERROR_094006ee, "DS",
6935                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero. %s",
6936                        validation_error_map[VALIDATION_ERROR_094006ee]);
6937    }
6938    if (pCreateInfo->layers <= 0) {
6939        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6940                        VALIDATION_ERROR_094006f2, "DS",
6941                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero. %s",
6942                        validation_error_map[VALIDATION_ERROR_094006f2]);
6943    }
6944    return skip;
6945}
6946
6947// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
6948//  Return true if an error is encountered and callback returns true to skip call down chain
6949//   false indicates that call down chain should proceed
6950static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
6951    // TODO : Verify that renderPass FB is created with is compatible with FB
6952    bool skip = false;
6953    skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
6954    return skip;
6955}
6956
6957// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
6958static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
6959    // Shadow create info and store in map
6960    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
6961        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
6962
6963    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
6964        VkImageView view = pCreateInfo->pAttachments[i];
6965        auto view_state = GetImageViewState(dev_data, view);
6966        if (!view_state) {
6967            continue;
6968        }
6969        MT_FB_ATTACHMENT_INFO fb_info;
6970        fb_info.view_state = view_state;
6971        fb_info.image = view_state->create_info.image;
6972        fb_state->attachments.push_back(fb_info);
6973    }
6974    dev_data->frameBufferMap[fb] = std::move(fb_state);
6975}
6976
6977VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
6978                                                 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
6979    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6980    std::unique_lock<std::mutex> lock(global_lock);
6981    bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
6982    lock.unlock();
6983
6984    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6985
6986    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
6987
6988    if (VK_SUCCESS == result) {
6989        lock.lock();
6990        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
6991        lock.unlock();
6992    }
6993    return result;
6994}
6995
6996static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
6997                           std::unordered_set<uint32_t> &processed_nodes) {
6998    // If we have already checked this node we have not found a dependency path so return false.
6999    if (processed_nodes.count(index)) return false;
7000    processed_nodes.insert(index);
7001    const DAGNode &node = subpass_to_node[index];
7002    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
7003    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
7004        for (auto elem : node.prev) {
7005            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
7006        }
7007    } else {
7008        return true;
7009    }
7010    return false;
7011}
7012
7013static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
7014                                  const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
7015                                  bool &skip) {
7016    bool result = true;
7017    // Loop through all subpasses that share the same attachment and make sure a dependency exists
7018    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
7019        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
7020        const DAGNode &node = subpass_to_node[subpass];
7021        // Check for a specified dependency between the two nodes. If one exists we are done.
7022        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
7023        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
7024        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
7025            // If no dependency exits an implicit dependency still might. If not, throw an error.
7026            std::unordered_set<uint32_t> processed_nodes;
7027            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
7028                  FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
7029                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7030                                __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7031                                "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
7032                                dependent_subpasses[k]);
7033                result = false;
7034            }
7035        }
7036    }
7037    return result;
7038}
7039
7040static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
7041                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
7042    const DAGNode &node = subpass_to_node[index];
7043    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
7044    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
7045    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7046        if (attachment == subpass.pColorAttachments[j].attachment) return true;
7047    }
7048    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7049        if (attachment == subpass.pInputAttachments[j].attachment) return true;
7050    }
7051    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7052        if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
7053    }
7054    bool result = false;
7055    // Loop through previous nodes and see if any of them write to the attachment.
7056    for (auto elem : node.prev) {
7057        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
7058    }
7059    // If the attachment was written to by a previous node than this node needs to preserve it.
7060    if (result && depth > 0) {
7061        bool has_preserved = false;
7062        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
7063            if (subpass.pPreserveAttachments[j] == attachment) {
7064                has_preserved = true;
7065                break;
7066            }
7067        }
7068        if (!has_preserved) {
7069            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7070                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7071                            "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
7072        }
7073    }
7074    return result;
7075}
7076
7077template <class T>
7078bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
7079    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
7080           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
7081}
7082
7083bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
7084    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
7085            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
7086}
7087
7088static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
7089                                 RENDER_PASS_STATE const *renderPass) {
7090    bool skip = false;
7091    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
7092    auto const pCreateInfo = renderPass->createInfo.ptr();
7093    auto const &subpass_to_node = renderPass->subpassToNode;
7094    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
7095    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
7096    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
7097    // Find overlapping attachments
7098    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
7099        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
7100            VkImageView viewi = pFramebufferInfo->pAttachments[i];
7101            VkImageView viewj = pFramebufferInfo->pAttachments[j];
7102            if (viewi == viewj) {
7103                overlapping_attachments[i].push_back(j);
7104                overlapping_attachments[j].push_back(i);
7105                continue;
7106            }
7107            auto view_state_i = GetImageViewState(dev_data, viewi);
7108            auto view_state_j = GetImageViewState(dev_data, viewj);
7109            if (!view_state_i || !view_state_j) {
7110                continue;
7111            }
7112            auto view_ci_i = view_state_i->create_info;
7113            auto view_ci_j = view_state_j->create_info;
7114            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
7115                overlapping_attachments[i].push_back(j);
7116                overlapping_attachments[j].push_back(i);
7117                continue;
7118            }
7119            auto image_data_i = GetImageState(dev_data, view_ci_i.image);
7120            auto image_data_j = GetImageState(dev_data, view_ci_j.image);
7121            if (!image_data_i || !image_data_j) {
7122                continue;
7123            }
7124            if (image_data_i->binding.mem == image_data_j->binding.mem &&
7125                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
7126                                   image_data_j->binding.size)) {
7127                overlapping_attachments[i].push_back(j);
7128                overlapping_attachments[j].push_back(i);
7129            }
7130        }
7131    }
7132    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
7133        uint32_t attachment = i;
7134        for (auto other_attachment : overlapping_attachments[i]) {
7135            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
7136                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7137                                HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
7138                                "Attachment %d aliases attachment %d but doesn't "
7139                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
7140                                attachment, other_attachment, validation_error_map[VALIDATION_ERROR_12200682]);
7141            }
7142            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
7143                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7144                                HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
7145                                "Attachment %d aliases attachment %d but doesn't "
7146                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
7147                                other_attachment, attachment, validation_error_map[VALIDATION_ERROR_12200682]);
7148            }
7149        }
7150    }
7151    // Find for each attachment the subpasses that use them.
7152    unordered_set<uint32_t> attachmentIndices;
7153    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7154        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7155        attachmentIndices.clear();
7156        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7157            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7158            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7159            input_attachment_to_subpass[attachment].push_back(i);
7160            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7161                input_attachment_to_subpass[overlapping_attachment].push_back(i);
7162            }
7163        }
7164        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7165            uint32_t attachment = subpass.pColorAttachments[j].attachment;
7166            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7167            output_attachment_to_subpass[attachment].push_back(i);
7168            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7169                output_attachment_to_subpass[overlapping_attachment].push_back(i);
7170            }
7171            attachmentIndices.insert(attachment);
7172        }
7173        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7174            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
7175            output_attachment_to_subpass[attachment].push_back(i);
7176            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7177                output_attachment_to_subpass[overlapping_attachment].push_back(i);
7178            }
7179
7180            if (attachmentIndices.count(attachment)) {
7181                skip |=
7182                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7183                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7184                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
7185            }
7186        }
7187    }
7188    // If there is a dependency needed make sure one exists
7189    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7190        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7191        // If the attachment is an input then all subpasses that output must have a dependency relationship
7192        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7193            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7194            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7195            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7196        }
7197        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
7198        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7199            uint32_t attachment = subpass.pColorAttachments[j].attachment;
7200            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7201            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7202            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
7203        }
7204        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7205            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
7206            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7207            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
7208        }
7209    }
7210    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
7211    // written.
7212    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7213        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7214        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7215            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
7216        }
7217    }
7218    return skip;
7219}
7220
7221static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
7222                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
7223    bool skip = false;
7224    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7225        DAGNode &subpass_node = subpass_to_node[i];
7226        subpass_node.pass = i;
7227    }
7228    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
7229        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
7230        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
7231            if (dependency.srcSubpass == dependency.dstSubpass) {
7232                skip |=
7233                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7234                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
7235            }
7236        } else if (dependency.srcSubpass > dependency.dstSubpass) {
7237            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7238                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7239                            "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
7240        } else if (dependency.srcSubpass == dependency.dstSubpass) {
7241            has_self_dependency[dependency.srcSubpass] = true;
7242        } else {
7243            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
7244            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
7245        }
7246    }
7247    return skip;
7248}
7249
7250VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
7251                                                  const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
7252    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7253    bool spirv_valid;
7254
7255    if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid))
7256        return VK_ERROR_VALIDATION_FAILED_EXT;
7257
7258    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
7259
7260    if (res == VK_SUCCESS) {
7261        std::lock_guard<std::mutex> lock(global_lock);
7262        unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo) : new shader_module());
7263        dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
7264    }
7265    return res;
7266}
7267
7268static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
7269    bool skip = false;
7270    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
7271        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7272                        VALIDATION_ERROR_12200684, "DS",
7273                        "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s", type,
7274                        attachment, attachment_count, validation_error_map[VALIDATION_ERROR_12200684]);
7275    }
7276    return skip;
7277}
7278
7279static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
7280
7281static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
7282    bool skip = false;
7283    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7284        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7285        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
7286            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7287                            __LINE__, VALIDATION_ERROR_14000698, "DS",
7288                            "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s", i,
7289                            validation_error_map[VALIDATION_ERROR_14000698]);
7290        }
7291
7292        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
7293            uint32_t attachment = subpass.pPreserveAttachments[j];
7294            if (attachment == VK_ATTACHMENT_UNUSED) {
7295                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7296                                __LINE__, VALIDATION_ERROR_140006aa, "DS",
7297                                "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
7298                                validation_error_map[VALIDATION_ERROR_140006aa]);
7299            } else {
7300                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
7301
7302                bool found = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment);
7303                for (uint32_t r = 0; !found && r < subpass.inputAttachmentCount; ++r) {
7304                    found = (subpass.pInputAttachments[r].attachment == attachment);
7305                }
7306                for (uint32_t r = 0; !found && r < subpass.colorAttachmentCount; ++r) {
7307                    found = (subpass.pColorAttachments[r].attachment == attachment) ||
7308                            (subpass.pResolveAttachments != NULL && subpass.pResolveAttachments[r].attachment == attachment);
7309                }
7310                if (found) {
7311                    skip |= log_msg(
7312                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7313                        VALIDATION_ERROR_140006ac, "DS",
7314                        "CreateRenderPass: subpass %u pPreserveAttachments[%u] (%u) must not be used elsewhere in the subpass. %s",
7315                        i, j, attachment, validation_error_map[VALIDATION_ERROR_140006ac]);
7316                }
7317            }
7318        }
7319
7320        auto subpass_performs_resolve =
7321            subpass.pResolveAttachments &&
7322            std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
7323                        [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
7324
7325        unsigned sample_count = 0;
7326
7327        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7328            uint32_t attachment;
7329            if (subpass.pResolveAttachments) {
7330                attachment = subpass.pResolveAttachments[j].attachment;
7331                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
7332
7333                if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
7334                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
7335                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7336                                    0, __LINE__, VALIDATION_ERROR_140006a2, "DS",
7337                                    "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
7338                                    "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
7339                                    i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
7340                                    validation_error_map[VALIDATION_ERROR_140006a2]);
7341                }
7342
7343                if (!skip && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
7344                    subpass.pColorAttachments[j].attachment == VK_ATTACHMENT_UNUSED) {
7345                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7346                                    0, __LINE__, VALIDATION_ERROR_1400069e, "DS",
7347                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
7348                                    "which has attachment=VK_ATTACHMENT_UNUSED. %s",
7349                                    i, attachment, validation_error_map[VALIDATION_ERROR_1400069e]);
7350                }
7351            }
7352            attachment = subpass.pColorAttachments[j].attachment;
7353            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
7354
7355            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
7356                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
7357
7358                if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
7359                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7360                                    0, __LINE__, VALIDATION_ERROR_140006a0, "DS",
7361                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
7362                                    "which has VK_SAMPLE_COUNT_1_BIT. %s",
7363                                    i, attachment, validation_error_map[VALIDATION_ERROR_140006a0]);
7364                }
7365
7366                if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
7367                    const auto &color_desc = pCreateInfo->pAttachments[attachment];
7368                    const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
7369                    if (color_desc.format != resolve_desc.format) {
7370                        skip |=
7371                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7372                                    0, __LINE__, VALIDATION_ERROR_140006a4, "DS",
7373                                    "CreateRenderPass:  Subpass %u pColorAttachments[%u] resolves to an attachment with a "
7374                                    "different format. "
7375                                    "color format: %u, resolve format: %u. %s",
7376                                    i, j, color_desc.format, resolve_desc.format, validation_error_map[VALIDATION_ERROR_140006a4]);
7377                    }
7378                }
7379            }
7380        }
7381
7382        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7383            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
7384            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
7385
7386            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
7387                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
7388            }
7389        }
7390
7391        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7392            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7393            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
7394        }
7395
7396        if (sample_count && !IsPowerOfTwo(sample_count)) {
7397            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7398                            __LINE__, VALIDATION_ERROR_0082b401, "DS",
7399                            "CreateRenderPass:  Subpass %u attempts to render to "
7400                            "attachments with inconsistent sample counts. %s",
7401                            i, validation_error_map[VALIDATION_ERROR_0082b401]);
7402        }
7403    }
7404    return skip;
7405}
7406
7407static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass,
7408                                   uint32_t index,
7409                                   bool is_read) {
7410    if (index == VK_ATTACHMENT_UNUSED)
7411        return;
7412
7413    if (!render_pass->attachment_first_read.count(index))
7414        render_pass->attachment_first_read[index] = is_read;
7415}
7416
7417VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
7418                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
7419    bool skip = false;
7420    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7421
7422    std::unique_lock<std::mutex> lock(global_lock);
7423    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
7424    //       ValidateLayouts.
7425    skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
7426    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
7427        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
7428                                             VALIDATION_ERROR_13e006b8, VALIDATION_ERROR_13e006bc);
7429        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
7430                                             VALIDATION_ERROR_13e006ba, VALIDATION_ERROR_13e006be);
7431    }
7432    if (!skip) {
7433        skip |= ValidateLayouts(dev_data, device, pCreateInfo);
7434    }
7435    lock.unlock();
7436
7437    if (skip) {
7438        return VK_ERROR_VALIDATION_FAILED_EXT;
7439    }
7440
7441    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
7442
7443    if (VK_SUCCESS == result) {
7444        lock.lock();
7445
7446        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
7447        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
7448        skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency);
7449
7450        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
7451        render_pass->renderPass = *pRenderPass;
7452        render_pass->hasSelfDependency = has_self_dependency;
7453        render_pass->subpassToNode = subpass_to_node;
7454
7455        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7456            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7457            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7458                MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
7459
7460                // resolve attachments are considered to be written
7461                if (subpass.pResolveAttachments) {
7462                    MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
7463                }
7464            }
7465            if (subpass.pDepthStencilAttachment) {
7466                MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
7467            }
7468            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7469                MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
7470            }
7471        }
7472
7473        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
7474    }
7475    return result;
7476}
7477
7478static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
7479                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
7480    bool skip = false;
7481    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7482        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7483                        HandleToUint64(pCB->commandBuffer), __LINE__, error_code, "DS",
7484                        "Cannot execute command %s on a secondary command buffer. %s", cmd_name, validation_error_map[error_code]);
7485    }
7486    return skip;
7487}
7488
7489static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
7490    bool skip = false;
7491    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
7492        &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
7493    if (pRenderPassBegin->renderArea.offset.x < 0 ||
7494        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
7495        pRenderPassBegin->renderArea.offset.y < 0 ||
7496        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
7497        skip |= static_cast<bool>(log_msg(
7498            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7499            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
7500            "Cannot execute a render pass with renderArea not within the bound of the "
7501            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
7502            "height %d.",
7503            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
7504            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
7505    }
7506    return skip;
7507}
7508
7509// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
7510// [load|store]Op flag must be checked
7511// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
7512template <typename T>
7513static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
7514    if (color_depth_op != op && stencil_op != op) {
7515        return false;
7516    }
7517    bool check_color_depth_load_op = !FormatIsStencilOnly(format);
7518    bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
7519
7520    return ((check_color_depth_load_op && (color_depth_op == op)) ||
7521            (check_stencil_load_op && (stencil_op == op)));
7522}
7523
7524VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
7525                                              VkSubpassContents contents) {
7526    bool skip = false;
7527    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7528    std::unique_lock<std::mutex> lock(global_lock);
7529    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
7530    auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
7531    auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
7532    if (cb_node) {
7533        if (render_pass_state) {
7534            uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
7535            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
7536            for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
7537                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
7538                auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
7539                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
7540                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
7541                    clear_op_size = static_cast<uint32_t>(i) + 1;
7542                    std::function<bool()> function = [=]() {
7543                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
7544                        return false;
7545                    };
7546                    cb_node->validate_functions.push_back(function);
7547                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
7548                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
7549                    std::function<bool()> function = [=]() {
7550                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
7551                        return false;
7552                    };
7553                    cb_node->validate_functions.push_back(function);
7554                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
7555                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
7556                    std::function<bool()> function = [=]() {
7557                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
7558                                                          "vkCmdBeginRenderPass()");
7559                    };
7560                    cb_node->validate_functions.push_back(function);
7561                }
7562                if (render_pass_state->attachment_first_read[i]) {
7563                    std::function<bool()> function = [=]() {
7564                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
7565                                                          "vkCmdBeginRenderPass()");
7566                    };
7567                    cb_node->validate_functions.push_back(function);
7568                }
7569            }
7570            if (clear_op_size > pRenderPassBegin->clearValueCount) {
7571                skip |= log_msg(
7572                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7573                    HandleToUint64(render_pass_state->renderPass), __LINE__, VALIDATION_ERROR_1200070c, "DS",
7574                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
7575                    "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
7576                    "0x%" PRIx64
7577                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
7578                    "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
7579                    "attachments that aren't cleared they will be ignored. %s",
7580                    pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass), clear_op_size,
7581                    clear_op_size - 1, validation_error_map[VALIDATION_ERROR_1200070c]);
7582            }
7583            skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
7584            skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
7585                                                          GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
7586            skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00017);
7587            skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
7588            skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00019);
7589            skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
7590                                          VALIDATION_ERROR_17a02415);
7591            skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
7592            UpdateCmdBufferLastCmd(cb_node, CMD_BEGINRENDERPASS);
7593            cb_node->activeRenderPass = render_pass_state;
7594            // This is a shallow copy as that is all that is needed for now
7595            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
7596            cb_node->activeSubpass = 0;
7597            cb_node->activeSubpassContents = contents;
7598            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
7599            // Connect this framebuffer and its children to this cmdBuffer
7600            AddFramebufferBinding(dev_data, cb_node, framebuffer);
7601            // transition attachments to the correct layouts for beginning of renderPass and first subpass
7602            TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
7603        }
7604    }
7605    lock.unlock();
7606    if (!skip) {
7607        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
7608    }
7609}
7610
7611VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
7612    bool skip = false;
7613    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7614    std::unique_lock<std::mutex> lock(global_lock);
7615    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7616    if (pCB) {
7617        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600019);
7618        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b602415);
7619        skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
7620        UpdateCmdBufferLastCmd(pCB, CMD_NEXTSUBPASS);
7621        skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600017);
7622
7623        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
7624        if (pCB->activeSubpass == subpassCount - 1) {
7625            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7626                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1b60071a, "DS",
7627                            "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s",
7628                            validation_error_map[VALIDATION_ERROR_1b60071a]);
7629        }
7630    }
7631    lock.unlock();
7632
7633    if (skip) return;
7634
7635    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
7636
7637    if (pCB) {
7638        lock.lock();
7639        pCB->activeSubpass++;
7640        pCB->activeSubpassContents = contents;
7641        TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
7642                                 GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
7643    }
7644}
7645
7646VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
7647    bool skip = false;
7648    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7649    std::unique_lock<std::mutex> lock(global_lock);
7650    auto pCB = GetCBNode(dev_data, commandBuffer);
7651    FRAMEBUFFER_STATE *framebuffer = NULL;
7652    if (pCB) {
7653        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
7654        framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
7655        if (rp_state) {
7656            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
7657                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7658                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
7659                                VALIDATION_ERROR_1b00071c, "DS", "vkCmdEndRenderPass(): Called before reaching final subpass. %s",
7660                                validation_error_map[VALIDATION_ERROR_1b00071c]);
7661            }
7662
7663            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
7664                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
7665                auto pAttachment = &rp_state->createInfo.pAttachments[i];
7666                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
7667                                                         VK_ATTACHMENT_STORE_OP_STORE)) {
7668                    std::function<bool()> function = [=]() {
7669                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
7670                        return false;
7671                    };
7672                    pCB->validate_functions.push_back(function);
7673                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
7674                                                                pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
7675                    std::function<bool()> function = [=]() {
7676                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
7677                        return false;
7678                    };
7679                    pCB->validate_functions.push_back(function);
7680                }
7681            }
7682        }
7683        skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_1b000017);
7684        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_1b000019);
7685        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b002415);
7686        skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
7687        UpdateCmdBufferLastCmd(pCB, CMD_ENDRENDERPASS);
7688    }
7689    lock.unlock();
7690
7691    if (skip) return;
7692
7693    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
7694
7695    if (pCB) {
7696        lock.lock();
7697        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
7698        pCB->activeRenderPass = nullptr;
7699        pCB->activeSubpass = 0;
7700        pCB->activeFramebuffer = VK_NULL_HANDLE;
7701    }
7702}
7703
7704static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
7705                                        uint32_t secondaryAttach, const char *msg) {
7706    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7707                   HandleToUint64(secondaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c4, "DS",
7708                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64
7709                   " which has a render pass "
7710                   "that is not compatible with the Primary Cmd Buffer current render pass. "
7711                   "Attachment %u is not compatible with %u: %s. %s",
7712                   HandleToUint64(secondaryBuffer), primaryAttach, secondaryAttach, msg,
7713                   validation_error_map[VALIDATION_ERROR_1b2000c4]);
7714}
7715
7716static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7717                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
7718                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
7719                                            uint32_t secondaryAttach, bool is_multi) {
7720    bool skip = false;
7721    if (primaryPassCI->attachmentCount <= primaryAttach) {
7722        primaryAttach = VK_ATTACHMENT_UNUSED;
7723    }
7724    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
7725        secondaryAttach = VK_ATTACHMENT_UNUSED;
7726    }
7727    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
7728        return skip;
7729    }
7730    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
7731        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
7732                                            "The first is unused while the second is not.");
7733        return skip;
7734    }
7735    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
7736        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
7737                                            "The second is unused while the first is not.");
7738        return skip;
7739    }
7740    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
7741        skip |=
7742            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
7743    }
7744    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
7745        skip |=
7746            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
7747    }
7748    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
7749        skip |=
7750            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
7751    }
7752    return skip;
7753}
7754
7755static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7756                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
7757                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
7758    bool skip = false;
7759    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
7760    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
7761    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
7762    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
7763        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
7764        if (i < primary_desc.inputAttachmentCount) {
7765            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
7766        }
7767        if (i < secondary_desc.inputAttachmentCount) {
7768            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
7769        }
7770        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
7771                                                secondaryPassCI, secondary_input_attach, is_multi);
7772    }
7773    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
7774    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
7775        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
7776        if (i < primary_desc.colorAttachmentCount) {
7777            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
7778        }
7779        if (i < secondary_desc.colorAttachmentCount) {
7780            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
7781        }
7782        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
7783                                                secondaryPassCI, secondary_color_attach, is_multi);
7784        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
7785        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
7786            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
7787        }
7788        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
7789            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
7790        }
7791        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach, secondaryBuffer,
7792                                                secondaryPassCI, secondary_resolve_attach, is_multi);
7793    }
7794    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
7795    if (primary_desc.pDepthStencilAttachment) {
7796        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
7797    }
7798    if (secondary_desc.pDepthStencilAttachment) {
7799        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
7800    }
7801    skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach, secondaryBuffer,
7802                                            secondaryPassCI, secondary_depthstencil_attach, is_multi);
7803    return skip;
7804}
7805
7806// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
7807//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
7808//  will then feed into this function
7809static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7810                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
7811                                            VkRenderPassCreateInfo const *secondaryPassCI) {
7812    bool skip = false;
7813
7814    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
7815        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7816                        HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7817                        "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
7818                        " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
7819                        " that has a subpassCount of %u.",
7820                        HandleToUint64(secondaryBuffer), secondaryPassCI->subpassCount, HandleToUint64(primaryBuffer),
7821                        primaryPassCI->subpassCount);
7822    } else {
7823        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
7824            skip |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
7825                                                 primaryPassCI->subpassCount > 1);
7826        }
7827    }
7828    return skip;
7829}
7830
7831static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
7832                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
7833    bool skip = false;
7834    if (!pSubCB->beginInfo.pInheritanceInfo) {
7835        return skip;
7836    }
7837    VkFramebuffer primary_fb = pCB->activeFramebuffer;
7838    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
7839    if (secondary_fb != VK_NULL_HANDLE) {
7840        if (primary_fb != secondary_fb) {
7841            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7842                            HandleToUint64(primaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c6, "DS",
7843                            "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
7844                            " which has a framebuffer 0x%" PRIx64
7845                            " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
7846                            HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb),
7847                            validation_error_map[VALIDATION_ERROR_1b2000c6]);
7848        }
7849        auto fb = GetFramebufferState(dev_data, secondary_fb);
7850        if (!fb) {
7851            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7852                            HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7853                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7854                            "which has invalid framebuffer 0x%" PRIx64 ".",
7855                            (void *)secondaryBuffer, HandleToUint64(secondary_fb));
7856            return skip;
7857        }
7858        auto cb_renderpass = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
7859        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
7860            skip |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
7861                                                    cb_renderpass->createInfo.ptr());
7862        }
7863    }
7864    return skip;
7865}
7866
7867static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
7868    bool skip = false;
7869    unordered_set<int> activeTypes;
7870    for (auto queryObject : pCB->activeQueries) {
7871        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
7872        if (queryPoolData != dev_data->queryPoolMap.end()) {
7873            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
7874                pSubCB->beginInfo.pInheritanceInfo) {
7875                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
7876                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
7877                    skip |= log_msg(
7878                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7879                        HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1b2000d0, "DS",
7880                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7881                        "which has invalid active query pool 0x%" PRIx64
7882                        ". Pipeline statistics is being queried so the command "
7883                        "buffer must have all bits set on the queryPool. %s",
7884                        pCB->commandBuffer, HandleToUint64(queryPoolData->first), validation_error_map[VALIDATION_ERROR_1b2000d0]);
7885                }
7886            }
7887            activeTypes.insert(queryPoolData->second.createInfo.queryType);
7888        }
7889    }
7890    for (auto queryObject : pSubCB->startedQueries) {
7891        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
7892        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
7893            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7894                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7895                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7896                            "which has invalid active query pool 0x%" PRIx64
7897                            "of type %d but a query of that type has been started on "
7898                            "secondary Cmd Buffer 0x%p.",
7899                            pCB->commandBuffer, HandleToUint64(queryPoolData->first), queryPoolData->second.createInfo.queryType,
7900                            pSubCB->commandBuffer);
7901        }
7902    }
7903
7904    auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
7905    auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
7906    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
7907        skip |=
7908            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7909                    HandleToUint64(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
7910                    "vkCmdExecuteCommands(): Primary command buffer 0x%p"
7911                    " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
7912                    pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
7913    }
7914
7915    return skip;
7916}
7917
7918VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
7919                                              const VkCommandBuffer *pCommandBuffers) {
7920    bool skip = false;
7921    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7922    std::unique_lock<std::mutex> lock(global_lock);
7923    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7924    if (pCB) {
7925        GLOBAL_CB_NODE *pSubCB = NULL;
7926        for (uint32_t i = 0; i < commandBuffersCount; i++) {
7927            pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
7928            assert(pSubCB);
7929            if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
7930                skip |=
7931                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7932                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000b0, "DS",
7933                            "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
7934                            "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
7935                            pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_1b2000b0]);
7936            } else if (pCB->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
7937                if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
7938                    auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
7939                    if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7940                        skip |= log_msg(
7941                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7942                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000c0, "DS",
7943                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
7944                            ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT "
7945                            "set. %s",
7946                            pCommandBuffers[i], HandleToUint64(pCB->activeRenderPass->renderPass),
7947                            validation_error_map[VALIDATION_ERROR_1b2000c0]);
7948                    } else {
7949                        // Make sure render pass is compatible with parent command buffer pass if has continue
7950                        if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
7951                            skip |=
7952                                validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
7953                                                                pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
7954                        }
7955                        //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
7956                        skip |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
7957                    }
7958                    string errorString = "";
7959                    // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
7960                    if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
7961                        !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
7962                                                         secondary_rp_state->createInfo.ptr(), errorString)) {
7963                        skip |= log_msg(
7964                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7965                            HandleToUint64(pCommandBuffers[i]), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
7966                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
7967                            ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
7968                            pCommandBuffers[i], HandleToUint64(pSubCB->beginInfo.pInheritanceInfo->renderPass), commandBuffer,
7969                            HandleToUint64(pCB->activeRenderPass->renderPass), errorString.c_str());
7970                    }
7971                }
7972            }
7973            // TODO(mlentine): Move more logic into this method
7974            skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
7975            skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0, VALIDATION_ERROR_1b2000b2);
7976            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
7977                if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
7978                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7979                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
7980                                    VALIDATION_ERROR_1b2000b4, "DS",
7981                                    "Attempt to simultaneously execute command buffer 0x%p"
7982                                    " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
7983                                    pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_1b2000b4]);
7984                }
7985                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
7986                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
7987                    skip |= log_msg(
7988                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7989                        HandleToUint64(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
7990                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
7991                        "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
7992                        "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
7993                        "set, even though it does.",
7994                        pCommandBuffers[i], pCB->commandBuffer);
7995                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
7996                }
7997            }
7998            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
7999                skip |=
8000                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8001                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000ca, "DS",
8002                            "vkCmdExecuteCommands(): Secondary Command Buffer "
8003                            "(0x%p) cannot be submitted with a query in "
8004                            "flight and inherited queries not "
8005                            "supported on this device. %s",
8006                            pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_1b2000ca]);
8007            }
8008            // TODO: separate validate from update! This is very tangled.
8009            // Propagate layout transitions to the primary cmd buffer
8010            for (auto ilm_entry : pSubCB->imageLayoutMap) {
8011                SetLayout(dev_data, pCB, ilm_entry.first, ilm_entry.second);
8012            }
8013            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
8014            pCB->linkedCommandBuffers.insert(pSubCB);
8015            pSubCB->linkedCommandBuffers.insert(pCB);
8016            for (auto &function : pSubCB->queryUpdates) {
8017                pCB->queryUpdates.push_back(function);
8018            }
8019        }
8020        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_1b200019);
8021        skip |=
8022            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
8023                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b202415);
8024        skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
8025        UpdateCmdBufferLastCmd(pCB, CMD_EXECUTECOMMANDS);
8026    }
8027    lock.unlock();
8028    if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
8029}
8030
8031VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
8032                                         void **ppData) {
8033    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8034
8035    bool skip = false;
8036    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8037    std::unique_lock<std::mutex> lock(global_lock);
8038    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
8039    if (mem_info) {
8040        // TODO : This could me more fine-grained to track just region that is valid
8041        mem_info->global_valid = true;
8042        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
8043        skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
8044        // TODO : Do we need to create new "bound_range" for the mapped range?
8045        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
8046        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
8047             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
8048            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8049                           HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200554, "MEM",
8050                           "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
8051                           HandleToUint64(mem), validation_error_map[VALIDATION_ERROR_31200554]);
8052        }
8053    }
8054    skip |= ValidateMapMemRange(dev_data, mem, offset, size);
8055    lock.unlock();
8056
8057    if (!skip) {
8058        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
8059        if (VK_SUCCESS == result) {
8060            lock.lock();
8061            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
8062            storeMemRanges(dev_data, mem, offset, size);
8063            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
8064            lock.unlock();
8065        }
8066    }
8067    return result;
8068}
8069
8070VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
8071    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8072    bool skip = false;
8073
8074    std::unique_lock<std::mutex> lock(global_lock);
8075    skip |= deleteMemRanges(dev_data, mem);
8076    lock.unlock();
8077    if (!skip) {
8078        dev_data->dispatch_table.UnmapMemory(device, mem);
8079    }
8080}
8081
8082static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
8083                                   const VkMappedMemoryRange *pMemRanges) {
8084    bool skip = false;
8085    for (uint32_t i = 0; i < memRangeCount; ++i) {
8086        auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
8087        if (mem_info) {
8088            if (pMemRanges[i].size == VK_WHOLE_SIZE) {
8089                if (mem_info->mem_range.offset > pMemRanges[i].offset) {
8090                    skip |=
8091                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8092                                HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055c, "MEM",
8093                                "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
8094                                ") is less than Memory Object's offset "
8095                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
8096                                funcName, static_cast<size_t>(pMemRanges[i].offset),
8097                                static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_0c20055c]);
8098                }
8099            } else {
8100                const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
8101                                              ? mem_info->alloc_info.allocationSize
8102                                              : (mem_info->mem_range.offset + mem_info->mem_range.size);
8103                if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
8104                    (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
8105                    skip |=
8106                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8107                                HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055a, "MEM",
8108                                "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
8109                                ") exceed the Memory Object's upper-bound "
8110                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
8111                                funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
8112                                static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
8113                                validation_error_map[VALIDATION_ERROR_0c20055a]);
8114                }
8115            }
8116        }
8117    }
8118    return skip;
8119}
8120
8121static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
8122                                                     const VkMappedMemoryRange *mem_ranges) {
8123    bool skip = false;
8124    for (uint32_t i = 0; i < mem_range_count; ++i) {
8125        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
8126        if (mem_info) {
8127            if (mem_info->shadow_copy) {
8128                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
8129                                        ? mem_info->mem_range.size
8130                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
8131                char *data = static_cast<char *>(mem_info->shadow_copy);
8132                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
8133                    if (data[j] != NoncoherentMemoryFillValue) {
8134                        skip |= log_msg(
8135                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8136                            HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
8137                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, HandleToUint64(mem_ranges[i].memory));
8138                    }
8139                }
8140                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
8141                    if (data[j] != NoncoherentMemoryFillValue) {
8142                        skip |= log_msg(
8143                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8144                            HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
8145                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, HandleToUint64(mem_ranges[i].memory));
8146                    }
8147                }
8148                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
8149            }
8150        }
8151    }
8152    return skip;
8153}
8154
8155static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
8156    for (uint32_t i = 0; i < mem_range_count; ++i) {
8157        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
8158        if (mem_info && mem_info->shadow_copy) {
8159            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
8160                                    ? mem_info->mem_range.size
8161                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
8162            char *data = static_cast<char *>(mem_info->shadow_copy);
8163            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
8164        }
8165    }
8166}
8167
8168static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
8169                                                  const VkMappedMemoryRange *mem_ranges) {
8170    bool skip = false;
8171    for (uint32_t i = 0; i < mem_range_count; ++i) {
8172        uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
8173        if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
8174            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8175                            HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c20055e, "MEM",
8176                            "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
8177                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
8178                            func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_0c20055e]);
8179        }
8180        if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
8181            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8182                            HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c200560, "MEM",
8183                            "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
8184                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
8185                            func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_0c200560]);
8186        }
8187    }
8188    return skip;
8189}
8190
8191static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8192                                                   const VkMappedMemoryRange *mem_ranges) {
8193    bool skip = false;
8194    std::lock_guard<std::mutex> lock(global_lock);
8195    skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
8196    skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
8197    return skip;
8198}
8199
8200VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
8201                                                       const VkMappedMemoryRange *pMemRanges) {
8202    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8203    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8204
8205    if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
8206        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
8207    }
8208    return result;
8209}
8210
8211static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8212                                                        const VkMappedMemoryRange *mem_ranges) {
8213    bool skip = false;
8214    std::lock_guard<std::mutex> lock(global_lock);
8215    skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
8216    return skip;
8217}
8218
8219static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8220                                                       const VkMappedMemoryRange *mem_ranges) {
8221    std::lock_guard<std::mutex> lock(global_lock);
8222    // Update our shadow copy with modified driver data
8223    CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
8224}
8225
8226VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
8227                                                            const VkMappedMemoryRange *pMemRanges) {
8228    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8229    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8230
8231    if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
8232        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
8233        if (result == VK_SUCCESS) {
8234            PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
8235        }
8236    }
8237    return result;
8238}
8239
8240static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
8241                                           VkDeviceSize memoryOffset) {
8242    bool skip = false;
8243    if (image_state) {
8244        std::unique_lock<std::mutex> lock(global_lock);
8245        // Track objects tied to memory
8246        uint64_t image_handle = HandleToUint64(image);
8247        skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
8248        if (!image_state->memory_requirements_checked) {
8249            // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
8250            // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
8251            // vkGetImageMemoryRequirements()
8252            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8253                            image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
8254                            "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
8255                            " but vkGetImageMemoryRequirements() has not been called on that image.",
8256                            image_handle);
8257            // Make the call for them so we can verify the state
8258            lock.unlock();
8259            dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
8260            lock.lock();
8261        }
8262
8263        // Validate bound memory range information
8264        auto mem_info = GetMemObjInfo(dev_data, mem);
8265        if (mem_info) {
8266            skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
8267                                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, "vkBindImageMemory()");
8268            skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
8269                                        VALIDATION_ERROR_1740082e);
8270        }
8271
8272        // Validate memory requirements alignment
8273        if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
8274            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8275                            image_handle, __LINE__, VALIDATION_ERROR_17400830, "DS",
8276                            "vkBindImageMemory(): memoryOffset is 0x%" PRIxLEAST64
8277                            " but must be an integer multiple of the "
8278                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
8279                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
8280                            memoryOffset, image_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_17400830]);
8281        }
8282
8283        // Validate memory requirements size
8284        if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
8285            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8286                            image_handle, __LINE__, VALIDATION_ERROR_17400832, "DS",
8287                            "vkBindImageMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
8288                            " but must be at least as large as "
8289                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
8290                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
8291                            mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size,
8292                            validation_error_map[VALIDATION_ERROR_17400832]);
8293        }
8294    }
8295    return skip;
8296}
8297
8298static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
8299                                          VkDeviceSize memoryOffset) {
8300    if (image_state) {
8301        std::unique_lock<std::mutex> lock(global_lock);
8302        // Track bound memory range information
8303        auto mem_info = GetMemObjInfo(dev_data, mem);
8304        if (mem_info) {
8305            InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
8306                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
8307        }
8308
8309        // Track objects tied to memory
8310        uint64_t image_handle = HandleToUint64(image);
8311        SetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
8312
8313        image_state->binding.mem = mem;
8314        image_state->binding.offset = memoryOffset;
8315        image_state->binding.size = image_state->requirements.size;
8316    }
8317}
8318
8319VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
8320    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8321    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8322    auto image_state = GetImageState(dev_data, image);
8323    bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
8324    if (!skip) {
8325        result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
8326        if (result == VK_SUCCESS) {
8327            PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
8328        }
8329    }
8330    return result;
8331}
8332
8333VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
8334    bool skip = false;
8335    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8336    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8337    std::unique_lock<std::mutex> lock(global_lock);
8338    auto event_state = GetEventNode(dev_data, event);
8339    if (event_state) {
8340        event_state->needsSignaled = false;
8341        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
8342        if (event_state->write_in_use) {
8343            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8344                            HandleToUint64(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8345                            "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
8346                            HandleToUint64(event));
8347        }
8348    }
8349    lock.unlock();
8350    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
8351    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
8352    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
8353    for (auto queue_data : dev_data->queueMap) {
8354        auto event_entry = queue_data.second.eventToStageMap.find(event);
8355        if (event_entry != queue_data.second.eventToStageMap.end()) {
8356            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
8357        }
8358    }
8359    if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
8360    return result;
8361}
8362
8363VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
8364                                               VkFence fence) {
8365    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8366    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8367    bool skip = false;
8368    std::unique_lock<std::mutex> lock(global_lock);
8369    auto pFence = GetFenceNode(dev_data, fence);
8370    auto pQueue = GetQueueState(dev_data, queue);
8371
8372    // First verify that fence is not in use
8373    skip |= ValidateFenceForSubmit(dev_data, pFence);
8374
8375    if (pFence) {
8376        SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
8377    }
8378
8379    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
8380        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
8381        // Track objects tied to memory
8382        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
8383            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
8384                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
8385                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
8386                                        HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer))
8387                    skip = true;
8388            }
8389        }
8390        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
8391            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
8392                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
8393                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
8394                                        HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage))
8395                    skip = true;
8396            }
8397        }
8398        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
8399            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
8400                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
8401                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
8402                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
8403                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
8404                                        HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage))
8405                    skip = true;
8406            }
8407        }
8408
8409        std::vector<SEMAPHORE_WAIT> semaphore_waits;
8410        std::vector<VkSemaphore> semaphore_signals;
8411        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
8412            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
8413            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
8414            if (pSemaphore) {
8415                if (pSemaphore->signaled) {
8416                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
8417                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
8418                        pSemaphore->in_use.fetch_add(1);
8419                    }
8420                    pSemaphore->signaler.first = VK_NULL_HANDLE;
8421                    pSemaphore->signaled = false;
8422                } else {
8423                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
8424                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8425                                    "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64
8426                                    " that has no way to be signaled.",
8427                                    queue, HandleToUint64(semaphore));
8428                }
8429            }
8430        }
8431        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
8432            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
8433            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
8434            if (pSemaphore) {
8435                if (pSemaphore->signaled) {
8436                    skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
8437                                   HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8438                                   "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
8439                                   ", but that semaphore is already signaled.",
8440                                   queue, HandleToUint64(semaphore));
8441                } else {
8442                    pSemaphore->signaler.first = queue;
8443                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
8444                    pSemaphore->signaled = true;
8445                    pSemaphore->in_use.fetch_add(1);
8446                    semaphore_signals.push_back(semaphore);
8447                }
8448            }
8449        }
8450
8451        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals,
8452                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
8453    }
8454
8455    if (pFence && !bindInfoCount) {
8456        // No work to do, just dropping a fence in the queue by itself.
8457        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
8458                                         fence);
8459    }
8460
8461    lock.unlock();
8462
8463    if (!skip) return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
8464
8465    return result;
8466}
8467
8468VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
8469                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
8470    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8471    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
8472    if (result == VK_SUCCESS) {
8473        std::lock_guard<std::mutex> lock(global_lock);
8474        SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
8475        sNode->signaler.first = VK_NULL_HANDLE;
8476        sNode->signaler.second = 0;
8477        sNode->signaled = false;
8478    }
8479    return result;
8480}
8481
8482VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
8483                                           const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
8484    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8485    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
8486    if (result == VK_SUCCESS) {
8487        std::lock_guard<std::mutex> lock(global_lock);
8488        dev_data->eventMap[*pEvent].needsSignaled = false;
8489        dev_data->eventMap[*pEvent].write_in_use = 0;
8490        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
8491    }
8492    return result;
8493}
8494
8495static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
8496                                              VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
8497                                              SWAPCHAIN_NODE *old_swapchain_state) {
8498    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
8499
8500    // TODO: revisit this. some of these rules are being relaxed.
8501
8502    // All physical devices and queue families are required to be able
8503    // to present to any native window on Android; require the
8504    // application to have established support on any other platform.
8505    if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
8506        auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::const_reference qs) -> bool {
8507            // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
8508            return (qs.first.gpu == dev_data->physical_device) && qs.second;
8509        };
8510        const auto& support = surface_state->gpu_queue_support;
8511        bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
8512
8513        if (!is_supported) {
8514            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8515                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ec, "DS",
8516                        "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. "
8517                        "The vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support "
8518                        "with this surface for at least one queue family of this device. %s",
8519                        func_name, validation_error_map[VALIDATION_ERROR_146009ec]))
8520                return true;
8521        }
8522    }
8523
8524    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
8525        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8526                    HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
8527                    "%s: surface has an existing swapchain other than oldSwapchain", func_name))
8528            return true;
8529    }
8530    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
8531        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8532                    HandleToUint64(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE, "DS",
8533                    "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
8534            return true;
8535    }
8536    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
8537    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
8538        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
8539                    HandleToUint64(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8540                    "%s: surface capabilities not retrieved for this physical device", func_name))
8541            return true;
8542    } else {  // have valid capabilities
8543        auto &capabilities = physical_device_state->surfaceCapabilities;
8544        // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
8545        if (pCreateInfo->minImageCount < capabilities.minImageCount) {
8546            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8547                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ee, "DS",
8548                        "%s called with minImageCount = %d, which is outside the bounds returned "
8549                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
8550                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
8551                        validation_error_map[VALIDATION_ERROR_146009ee]))
8552                return true;
8553        }
8554
8555        if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
8556            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8557                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f0, "DS",
8558                        "%s called with minImageCount = %d, which is outside the bounds returned "
8559                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
8560                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
8561                        validation_error_map[VALIDATION_ERROR_146009f0]))
8562                return true;
8563        }
8564
8565        // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
8566        if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
8567            ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
8568             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
8569             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
8570             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
8571            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8572                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f4, "DS",
8573                        "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
8574                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
8575                        "maxImageExtent = (%d,%d). %s",
8576                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
8577                        capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
8578                        capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
8579                        validation_error_map[VALIDATION_ERROR_146009f4]))
8580                return true;
8581        }
8582        // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
8583        // VkSurfaceCapabilitiesKHR::supportedTransforms.
8584        if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
8585            !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
8586            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
8587            // it up a little at a time, and then log it:
8588            std::string errorString = "";
8589            char str[1024];
8590            // Here's the first part of the message:
8591            sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
8592                    string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
8593            errorString += str;
8594            for (int i = 0; i < 32; i++) {
8595                // Build up the rest of the message:
8596                if ((1 << i) & capabilities.supportedTransforms) {
8597                    const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
8598                    sprintf(str, "    %s\n", newStr);
8599                    errorString += str;
8600                }
8601            }
8602            // Log the message that we've built up:
8603            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8604                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009fe, "DS", "%s. %s", errorString.c_str(),
8605                        validation_error_map[VALIDATION_ERROR_146009fe]))
8606                return true;
8607        }
8608
8609        // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
8610        // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
8611        if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
8612            !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
8613            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
8614            // it up a little at a time, and then log it:
8615            std::string errorString = "";
8616            char str[1024];
8617            // Here's the first part of the message:
8618            sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
8619                    func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
8620            errorString += str;
8621            for (int i = 0; i < 32; i++) {
8622                // Build up the rest of the message:
8623                if ((1 << i) & capabilities.supportedCompositeAlpha) {
8624                    const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
8625                    sprintf(str, "    %s\n", newStr);
8626                    errorString += str;
8627                }
8628            }
8629            // Log the message that we've built up:
8630            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8631                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a00, "DS", "%s. %s", errorString.c_str(),
8632                        validation_error_map[VALIDATION_ERROR_14600a00]))
8633                return true;
8634        }
8635        // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
8636        if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
8637            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8638                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f6, "DS",
8639                        "%s called with a non-supported imageArrayLayers (i.e. %d).  Minimum value is 1, maximum value is %d. %s",
8640                        func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
8641                        validation_error_map[VALIDATION_ERROR_146009f6]))
8642                return true;
8643        }
8644        // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
8645        if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
8646            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8647                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f8, "DS",
8648                        "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
8649                        func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
8650                        validation_error_map[VALIDATION_ERROR_146009f8]))
8651                return true;
8652        }
8653    }
8654
8655    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
8656    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
8657        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8658                    HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8659                    "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
8660            return true;
8661    } else {
8662        // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
8663        bool foundFormat = false;
8664        bool foundColorSpace = false;
8665        bool foundMatch = false;
8666        for (auto const &format : physical_device_state->surface_formats) {
8667            if (pCreateInfo->imageFormat == format.format) {
8668                // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
8669                foundFormat = true;
8670                if (pCreateInfo->imageColorSpace == format.colorSpace) {
8671                    foundMatch = true;
8672                    break;
8673                }
8674            } else {
8675                if (pCreateInfo->imageColorSpace == format.colorSpace) {
8676                    foundColorSpace = true;
8677                }
8678            }
8679        }
8680        if (!foundMatch) {
8681            if (!foundFormat) {
8682                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8683                            HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
8684                            "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s", func_name,
8685                            pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_146009f2]))
8686                    return true;
8687            }
8688            if (!foundColorSpace) {
8689                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8690                            HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
8691                            "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s", func_name,
8692                            pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_146009f2]))
8693                    return true;
8694            }
8695        }
8696    }
8697
8698    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
8699    if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
8700        // FIFO is required to always be supported
8701        if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
8702            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8703                        HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8704                        "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
8705                return true;
8706        }
8707    } else {
8708        // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
8709        bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
8710                                    pCreateInfo->presentMode) != physical_device_state->present_modes.end();
8711        if (!foundMatch) {
8712            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8713                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a02, "DS",
8714                        "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
8715                        string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_14600a02]))
8716                return true;
8717        }
8718    }
8719    // Validate state for shared presentable case
8720    if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
8721        VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
8722        if (!dev_data->extensions.vk_khr_shared_presentable_image) {
8723            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8724                        HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_EXTENSION_NOT_ENABLED, "DS",
8725                        "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
8726                        "been enabled.",
8727                        func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
8728                return true;
8729        } else if (pCreateInfo->minImageCount != 1) {
8730            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8731                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_14600ace, "DS",
8732                        "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
8733                        "must be 1. %s",
8734                        func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount,
8735                        validation_error_map[VALIDATION_ERROR_14600ace]))
8736                return true;
8737        }
8738    }
8739
8740    return false;
8741}
8742
8743static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
8744                                             VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
8745                                             SWAPCHAIN_NODE *old_swapchain_state) {
8746    if (VK_SUCCESS == result) {
8747        std::lock_guard<std::mutex> lock(global_lock);
8748        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
8749        if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
8750            VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
8751            swapchain_state->shared_presentable = true;
8752        }
8753        surface_state->swapchain = swapchain_state.get();
8754        dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
8755    } else {
8756        surface_state->swapchain = nullptr;
8757    }
8758    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
8759    if (old_swapchain_state) {
8760        old_swapchain_state->replaced = true;
8761    }
8762    surface_state->old_swapchain = old_swapchain_state;
8763    return;
8764}
8765
8766VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
8767                                                  const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
8768    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8769    auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
8770    auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
8771
8772    if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
8773        return VK_ERROR_VALIDATION_FAILED_EXT;
8774    }
8775
8776    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
8777
8778    PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
8779
8780    return result;
8781}
8782
8783VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
8784    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8785    bool skip = false;
8786
8787    std::unique_lock<std::mutex> lock(global_lock);
8788    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
8789    if (swapchain_data) {
8790        if (swapchain_data->images.size() > 0) {
8791            for (auto swapchain_image : swapchain_data->images) {
8792                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
8793                if (image_sub != dev_data->imageSubresourceMap.end()) {
8794                    for (auto imgsubpair : image_sub->second) {
8795                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
8796                        if (image_item != dev_data->imageLayoutMap.end()) {
8797                            dev_data->imageLayoutMap.erase(image_item);
8798                        }
8799                    }
8800                    dev_data->imageSubresourceMap.erase(image_sub);
8801                }
8802                skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
8803                dev_data->imageMap.erase(swapchain_image);
8804            }
8805        }
8806
8807        auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
8808        if (surface_state) {
8809            if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
8810            if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
8811        }
8812
8813        dev_data->swapchainMap.erase(swapchain);
8814    }
8815    lock.unlock();
8816    if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
8817}
8818
8819static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
8820                                                 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
8821    bool skip = false;
8822    if (swapchain_state && pSwapchainImages) {
8823        std::lock_guard<std::mutex> lock(global_lock);
8824        // Compare the preliminary value of *pSwapchainImageCount with the value this time:
8825        if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
8826            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8827                            HandleToUint64(device), __LINE__, SWAPCHAIN_PRIOR_COUNT, "DS",
8828                            "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive "
8829                            "value has been seen for pSwapchainImages.");
8830        } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
8831            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8832                            HandleToUint64(device), __LINE__, SWAPCHAIN_INVALID_COUNT, "DS",
8833                            "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with "
8834                            "pSwapchainImages set to a value (%d) that is greater than the value (%d) that was returned when "
8835                            "pSwapchainImageCount was NULL.",
8836                            *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
8837        }
8838    }
8839    return skip;
8840}
8841
8842static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
8843                                                uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
8844    std::lock_guard<std::mutex> lock(global_lock);
8845
8846    if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
8847
8848    if (pSwapchainImages) {
8849        if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
8850            swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
8851        }
8852        for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
8853            if (swapchain_state->images[i] != VK_NULL_HANDLE) continue;  // Already retrieved this.
8854
8855            IMAGE_LAYOUT_NODE image_layout_node;
8856            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
8857            image_layout_node.format = swapchain_state->createInfo.imageFormat;
8858            // Add imageMap entries for each swapchain image
8859            VkImageCreateInfo image_ci = {};
8860            image_ci.flags = 0;
8861            image_ci.imageType = VK_IMAGE_TYPE_2D;
8862            image_ci.format = swapchain_state->createInfo.imageFormat;
8863            image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
8864            image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
8865            image_ci.extent.depth = 1;
8866            image_ci.mipLevels = 1;
8867            image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
8868            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
8869            image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
8870            image_ci.usage = swapchain_state->createInfo.imageUsage;
8871            image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
8872            device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
8873            auto &image_state = device_data->imageMap[pSwapchainImages[i]];
8874            image_state->valid = false;
8875            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
8876            swapchain_state->images[i] = pSwapchainImages[i];
8877            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
8878            device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
8879            device_data->imageLayoutMap[subpair] = image_layout_node;
8880        }
8881    }
8882
8883    if (*pSwapchainImageCount) {
8884        if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
8885            swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
8886        }
8887        swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
8888    }
8889}
8890
8891VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
8892                                                     VkImage *pSwapchainImages) {
8893    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8894    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8895
8896    auto swapchain_state = GetSwapchainNode(device_data, swapchain);
8897    bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
8898
8899    if (!skip) {
8900        result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
8901    }
8902
8903    if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
8904        PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
8905    }
8906    return result;
8907}
8908
8909VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
8910    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8911    bool skip = false;
8912
8913    std::lock_guard<std::mutex> lock(global_lock);
8914    auto queue_state = GetQueueState(dev_data, queue);
8915
8916    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
8917        auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
8918        if (pSemaphore && !pSemaphore->signaled) {
8919            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8920                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8921                            "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
8922                            HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
8923        }
8924    }
8925
8926    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
8927        auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
8928        if (swapchain_data) {
8929            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
8930                skip |=
8931                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8932                            HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
8933                            "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
8934                            pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
8935            } else {
8936                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
8937                auto image_state = GetImageState(dev_data, image);
8938
8939                if (image_state->shared_presentable) {
8940                    image_state->layout_locked = true;
8941                }
8942
8943                skip |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
8944
8945                if (!image_state->acquired) {
8946                    skip |= log_msg(
8947                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8948                        HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED, "DS",
8949                        "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
8950                }
8951
8952                vector<VkImageLayout> layouts;
8953                if (FindLayouts(dev_data, image, layouts)) {
8954                    for (auto layout : layouts) {
8955                        if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) &&
8956                            (!dev_data->extensions.vk_khr_shared_presentable_image ||
8957                             (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
8958                            skip |=
8959                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
8960                                        HandleToUint64(queue), __LINE__, VALIDATION_ERROR_11200a20, "DS",
8961                                        "Images passed to present must be in layout "
8962                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s. %s",
8963                                        string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_11200a20]);
8964                        }
8965                    }
8966                }
8967            }
8968
8969            // All physical devices and queue families are required to be able
8970            // to present to any native window on Android; require the
8971            // application to have established support on any other platform.
8972            if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
8973                auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
8974                auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
8975
8976                if (support_it == surface_state->gpu_queue_support.end()) {
8977                    skip |=
8978                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8979                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS",
8980                                "vkQueuePresentKHR: Presenting image without calling "
8981                                "vkGetPhysicalDeviceSurfaceSupportKHR");
8982                } else if (!support_it->second) {
8983                    skip |=
8984                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8985                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_31800a18, "DS",
8986                                "vkQueuePresentKHR: Presenting image on queue that cannot "
8987                                "present to this surface. %s",
8988                                validation_error_map[VALIDATION_ERROR_31800a18]);
8989                }
8990            }
8991        }
8992    }
8993    if (pPresentInfo && pPresentInfo->pNext) {
8994        // Verify ext struct
8995        struct std_header {
8996            VkStructureType sType;
8997            const void *pNext;
8998        };
8999        std_header *pnext = (std_header *)pPresentInfo->pNext;
9000        while (pnext) {
9001            if (VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR == pnext->sType) {
9002                VkPresentRegionsKHR *present_regions = (VkPresentRegionsKHR *)pnext;
9003                for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
9004                    auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9005                    assert(swapchain_data);
9006                    VkPresentRegionKHR region = present_regions->pRegions[i];
9007                    for (uint32_t j = 0; j < region.rectangleCount; ++j) {
9008                        VkRectLayerKHR rect = region.pRectangles[j];
9009                        // TODO: Need to update these errors to their unique error ids when available
9010                        if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
9011                            skip |= log_msg(
9012                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9013                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9014                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
9015                                "chain, pRegion[%i].pRectangles[%i], the sum of offset.x "
9016                                "(%i) and extent.width (%i) is greater than the "
9017                                "corresponding swapchain's imageExtent.width (%i).",
9018                                i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
9019                        }
9020                        if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
9021                            skip |= log_msg(
9022                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9023                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9024                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
9025                                "chain, pRegion[%i].pRectangles[%i], the sum of offset.y "
9026                                "(%i) and extent.height (%i) is greater than the "
9027                                "corresponding swapchain's imageExtent.height (%i).",
9028                                i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
9029                        }
9030                        if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
9031                            skip |= log_msg(
9032                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9033                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9034                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the "
9035                                "layer (%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
9036                                i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
9037                        }
9038                    }
9039                }
9040            } else if (VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE == pnext->sType) {
9041                VkPresentTimesInfoGOOGLE *present_times_info = (VkPresentTimesInfoGOOGLE *)pnext;
9042                if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
9043                    skip |=
9044                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9045                                HandleToUint64(pPresentInfo->pSwapchains[0]), __LINE__,
9046
9047                                VALIDATION_ERROR_118009be, "DS",
9048                                "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but "
9049                                "pPresentInfo->swapchainCount is %i. For VkPresentTimesInfoGOOGLE down pNext "
9050                                "chain of VkPresentInfoKHR, VkPresentTimesInfoGOOGLE.swapchainCount "
9051                                "must equal VkPresentInfoKHR.swapchainCount.",
9052                                present_times_info->swapchainCount, pPresentInfo->swapchainCount);
9053                }
9054            }
9055            pnext = (std_header *)pnext->pNext;
9056        }
9057    }
9058
9059    if (skip) {
9060        return VK_ERROR_VALIDATION_FAILED_EXT;
9061    }
9062
9063    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
9064
9065    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
9066        // Semaphore waits occur before error generation, if the call reached
9067        // the ICD. (Confirm?)
9068        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9069            auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
9070            if (pSemaphore) {
9071                pSemaphore->signaler.first = VK_NULL_HANDLE;
9072                pSemaphore->signaled = false;
9073            }
9074        }
9075
9076        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9077            // Note: this is imperfect, in that we can get confused about what
9078            // did or didn't succeed-- but if the app does that, it's confused
9079            // itself just as much.
9080            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
9081
9082            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
9083
9084            // Mark the image as having been released to the WSI
9085            auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9086            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
9087            auto image_state = GetImageState(dev_data, image);
9088            image_state->acquired = false;
9089        }
9090
9091        // Note: even though presentation is directed to a queue, there is no
9092        // direct ordering between QP and subsequent work, so QP (and its
9093        // semaphore waits) /never/ participate in any completion proof.
9094    }
9095
9096    return result;
9097}
9098
9099static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
9100                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
9101                                                     std::vector<SURFACE_STATE *> &surface_state,
9102                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
9103    if (pCreateInfos) {
9104        std::lock_guard<std::mutex> lock(global_lock);
9105        for (uint32_t i = 0; i < swapchainCount; i++) {
9106            surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
9107            old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
9108            std::stringstream func_name;
9109            func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
9110            if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
9111                                                  old_swapchain_state[i])) {
9112                return true;
9113            }
9114        }
9115    }
9116    return false;
9117}
9118
9119static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
9120                                                    const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
9121                                                    std::vector<SURFACE_STATE *> &surface_state,
9122                                                    std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
9123    if (VK_SUCCESS == result) {
9124        for (uint32_t i = 0; i < swapchainCount; i++) {
9125            auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
9126            if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
9127                VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
9128                swapchain_state->shared_presentable = true;
9129            }
9130            surface_state[i]->swapchain = swapchain_state.get();
9131            dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
9132        }
9133    } else {
9134        for (uint32_t i = 0; i < swapchainCount; i++) {
9135            surface_state[i]->swapchain = nullptr;
9136        }
9137    }
9138    // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
9139    for (uint32_t i = 0; i < swapchainCount; i++) {
9140        if (old_swapchain_state[i]) {
9141            old_swapchain_state[i]->replaced = true;
9142        }
9143        surface_state[i]->old_swapchain = old_swapchain_state[i];
9144    }
9145    return;
9146}
9147
9148VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
9149                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
9150                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
9151    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9152    std::vector<SURFACE_STATE *> surface_state;
9153    std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
9154
9155    if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
9156                                                 old_swapchain_state)) {
9157        return VK_ERROR_VALIDATION_FAILED_EXT;
9158    }
9159
9160    VkResult result =
9161        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
9162
9163    PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
9164                                            old_swapchain_state);
9165
9166    return result;
9167}
9168
9169VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
9170                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
9171    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9172    bool skip = false;
9173
9174    std::unique_lock<std::mutex> lock(global_lock);
9175
9176    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
9177        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9178                        HandleToUint64(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
9179                        "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
9180                        "to determine the completion of this operation.");
9181    }
9182
9183    auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9184    if (pSemaphore && pSemaphore->signaled) {
9185        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9186                        HandleToUint64(semaphore), __LINE__, VALIDATION_ERROR_16400a0c, "DS",
9187                        "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
9188                        validation_error_map[VALIDATION_ERROR_16400a0c]);
9189    }
9190
9191    auto pFence = GetFenceNode(dev_data, fence);
9192    if (pFence) {
9193        skip |= ValidateFenceForSubmit(dev_data, pFence);
9194    }
9195
9196    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
9197
9198    if (swapchain_data->replaced) {
9199        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9200                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
9201                        "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
9202                        "present any images it has acquired, but cannot acquire any more.");
9203    }
9204
9205    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
9206    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
9207        uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
9208                                                 [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
9209        if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
9210            skip |=
9211                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9212                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
9213                        "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
9214                        acquired_images);
9215        }
9216    }
9217
9218    if (swapchain_data->images.size() == 0) {
9219        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9220                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
9221                        "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
9222                        "vkGetSwapchainImagesKHR after swapchain creation.");
9223    }
9224
9225    lock.unlock();
9226
9227    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9228
9229    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
9230
9231    lock.lock();
9232    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
9233        if (pFence) {
9234            pFence->state = FENCE_INFLIGHT;
9235            pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
9236        }
9237
9238        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
9239        if (pSemaphore) {
9240            pSemaphore->signaled = true;
9241            pSemaphore->signaler.first = VK_NULL_HANDLE;
9242        }
9243
9244        // Mark the image as acquired.
9245        auto image = swapchain_data->images[*pImageIndex];
9246        auto image_state = GetImageState(dev_data, image);
9247        image_state->acquired = true;
9248        image_state->shared_presentable = swapchain_data->shared_presentable;
9249    }
9250    lock.unlock();
9251
9252    return result;
9253}
9254
9255VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
9256                                                        VkPhysicalDevice *pPhysicalDevices) {
9257    bool skip = false;
9258    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9259    assert(instance_data);
9260
9261    // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
9262    if (NULL == pPhysicalDevices) {
9263        instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
9264    } else {
9265        if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
9266            // Flag warning here. You can call this without having queried the count, but it may not be
9267            // robust on platforms with multiple physical devices.
9268            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
9269                            0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9270                            "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
9271                            "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
9272        }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
9273        else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
9274            // Having actual count match count from app is not a requirement, so this can be a warning
9275            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9276                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9277                            "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
9278                            "supported by this instance is %u.",
9279                            *pPhysicalDeviceCount, instance_data->physical_devices_count);
9280        }
9281        instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
9282    }
9283    if (skip) {
9284        return VK_ERROR_VALIDATION_FAILED_EXT;
9285    }
9286    VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
9287    if (NULL == pPhysicalDevices) {
9288        instance_data->physical_devices_count = *pPhysicalDeviceCount;
9289    } else if (result == VK_SUCCESS) {  // Save physical devices
9290        for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
9291            auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
9292            phys_device_state.phys_device = pPhysicalDevices[i];
9293            // Init actual features for each physical device
9294            instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
9295        }
9296    }
9297    return result;
9298}
9299
9300// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
9301static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
9302                                                                 PHYSICAL_DEVICE_STATE *pd_state,
9303                                                                 uint32_t requested_queue_family_property_count, bool qfp_null,
9304                                                                 const char *caller_name) {
9305    bool skip = false;
9306    if (!qfp_null) {
9307        // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
9308        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
9309            skip |= log_msg(
9310                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9311                HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9312                "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
9313                "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
9314                caller_name, caller_name);
9315            // Then verify that pCount that is passed in on second call matches what was returned
9316        } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
9317            skip |= log_msg(
9318                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9319                HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9320                "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
9321                ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
9322                ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
9323                "previously obtained by calling %s with NULL pQueueFamilyProperties.",
9324                caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
9325        }
9326        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
9327    }
9328
9329    return skip;
9330}
9331
9332static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
9333                                                                  PHYSICAL_DEVICE_STATE *pd_state,
9334                                                                  uint32_t *pQueueFamilyPropertyCount,
9335                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
9336    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
9337                                                                (nullptr == pQueueFamilyProperties),
9338                                                                "vkGetPhysicalDeviceQueueFamilyProperties()");
9339}
9340
9341static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_layer_data *instance_data,
9342                                                                      PHYSICAL_DEVICE_STATE *pd_state,
9343                                                                      uint32_t *pQueueFamilyPropertyCount,
9344                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9345    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
9346                                                                (nullptr == pQueueFamilyProperties),
9347                                                                "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
9348}
9349
9350// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
9351static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9352                                                                    VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9353    if (!pQueueFamilyProperties) {
9354        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
9355            pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
9356        pd_state->queue_family_count = count;
9357    } else {  // Save queue family properties
9358        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
9359        pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
9360
9361        pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
9362        for (uint32_t i = 0; i < count; ++i) {
9363            pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
9364        }
9365    }
9366}
9367
9368static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9369                                                                 VkQueueFamilyProperties *pQueueFamilyProperties) {
9370    VkQueueFamilyProperties2KHR *pqfp = nullptr;
9371    std::vector<VkQueueFamilyProperties2KHR> qfp;
9372    qfp.resize(count);
9373    if (pQueueFamilyProperties) {
9374        for (uint32_t i = 0; i < count; ++i) {
9375            qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
9376            qfp[i].pNext = nullptr;
9377            qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
9378        }
9379        pqfp = qfp.data();
9380    }
9381    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
9382}
9383
9384static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9385                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9386    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
9387}
9388
9389VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
9390                                                                  uint32_t *pQueueFamilyPropertyCount,
9391                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
9392    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9393    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9394    assert(physical_device_state);
9395    std::unique_lock<std::mutex> lock(global_lock);
9396
9397    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
9398                                                                      pQueueFamilyPropertyCount, pQueueFamilyProperties);
9399
9400    lock.unlock();
9401
9402    if (skip) return;
9403
9404    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
9405                                                                         pQueueFamilyProperties);
9406
9407    lock.lock();
9408    PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
9409}
9410
9411VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
9412                                                                      uint32_t *pQueueFamilyPropertyCount,
9413                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9414    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9415    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9416    assert(physical_device_state);
9417    std::unique_lock<std::mutex> lock(global_lock);
9418
9419    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_data, physical_device_state,
9420                                                                          pQueueFamilyPropertyCount, pQueueFamilyProperties);
9421
9422    lock.unlock();
9423
9424    if (skip) return;
9425
9426    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
9427                                                                             pQueueFamilyProperties);
9428
9429    lock.lock();
9430    PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(physical_device_state, *pQueueFamilyPropertyCount,
9431                                                             pQueueFamilyProperties);
9432}
9433
9434template <typename TCreateInfo, typename FPtr>
9435static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
9436                              VkSurfaceKHR *pSurface, FPtr fptr) {
9437    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9438
9439    // Call down the call chain:
9440    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
9441
9442    if (result == VK_SUCCESS) {
9443        std::unique_lock<std::mutex> lock(global_lock);
9444        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
9445        lock.unlock();
9446    }
9447
9448    return result;
9449}
9450
9451VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
9452    bool skip = false;
9453    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9454    std::unique_lock<std::mutex> lock(global_lock);
9455    auto surface_state = GetSurfaceState(instance_data, surface);
9456
9457    if ((surface_state) && (surface_state->swapchain)) {
9458        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
9459            HandleToUint64(instance), __LINE__, VALIDATION_ERROR_26c009e4, "DS",
9460            "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed. %s",
9461            validation_error_map[VALIDATION_ERROR_26c009e4]);
9462    }
9463    instance_data->surface_map.erase(surface);
9464    lock.unlock();
9465    if (!skip) {
9466        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
9467    }
9468}
9469
9470VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
9471                                                            const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9472    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
9473}
9474
9475#ifdef VK_USE_PLATFORM_ANDROID_KHR
9476VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
9477                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9478    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
9479}
9480#endif  // VK_USE_PLATFORM_ANDROID_KHR
9481
9482#ifdef VK_USE_PLATFORM_MIR_KHR
9483VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
9484                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9485    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
9486}
9487
9488VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9489                                                                          uint32_t queueFamilyIndex, MirConnection *connection) {
9490    bool skip = false;
9491    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9492
9493    std::unique_lock<std::mutex> lock(global_lock);
9494    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9495
9496    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2d2009e2,
9497                                              "vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
9498
9499    lock.unlock();
9500
9501    if (skip) return VK_FALSE;
9502
9503    // Call down the call chain:
9504    VkBool32 result =
9505        instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
9506
9507    return result;
9508}
9509#endif  // VK_USE_PLATFORM_MIR_KHR
9510
9511#ifdef VK_USE_PLATFORM_WAYLAND_KHR
9512VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
9513                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9514    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
9515}
9516
9517VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9518                                                                              uint32_t queueFamilyIndex,
9519                                                                              struct wl_display *display) {
9520    bool skip = false;
9521    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9522
9523    std::unique_lock<std::mutex> lock(global_lock);
9524    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9525
9526    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f000a34,
9527                                              "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
9528
9529    lock.unlock();
9530
9531    if (skip) return VK_FALSE;
9532
9533    // Call down the call chain:
9534    VkBool32 result =
9535        instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
9536
9537    return result;
9538}
9539#endif  // VK_USE_PLATFORM_WAYLAND_KHR
9540
9541#ifdef VK_USE_PLATFORM_WIN32_KHR
9542VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
9543                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9544    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
9545}
9546
9547VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
9548                                                                            uint32_t queueFamilyIndex) {
9549    bool skip = false;
9550    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9551
9552    std::unique_lock<std::mutex> lock(global_lock);
9553    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9554
9555    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f200a3a,
9556                                              "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
9557
9558    lock.unlock();
9559
9560    if (skip) return VK_FALSE;
9561
9562    // Call down the call chain:
9563    VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
9564
9565    return result;
9566}
9567#endif  // VK_USE_PLATFORM_WIN32_KHR
9568
9569#ifdef VK_USE_PLATFORM_XCB_KHR
9570VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
9571                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9572    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
9573}
9574
9575VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9576                                                                          uint32_t queueFamilyIndex, xcb_connection_t *connection,
9577                                                                          xcb_visualid_t visual_id) {
9578    bool skip = false;
9579    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9580
9581    std::unique_lock<std::mutex> lock(global_lock);
9582    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9583
9584    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f400a40,
9585                                              "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
9586
9587    lock.unlock();
9588
9589    if (skip) return VK_FALSE;
9590
9591    // Call down the call chain:
9592    VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
9593                                                                                               connection, visual_id);
9594
9595    return result;
9596}
9597#endif  // VK_USE_PLATFORM_XCB_KHR
9598
9599#ifdef VK_USE_PLATFORM_XLIB_KHR
9600VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
9601                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9602    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
9603}
9604
9605VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9606                                                                           uint32_t queueFamilyIndex, Display *dpy,
9607                                                                           VisualID visualID) {
9608    bool skip = false;
9609    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9610
9611    std::unique_lock<std::mutex> lock(global_lock);
9612    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9613
9614    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f600a46,
9615                                              "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
9616
9617    lock.unlock();
9618
9619    if (skip) return VK_FALSE;
9620
9621    // Call down the call chain:
9622    VkBool32 result =
9623        instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
9624
9625    return result;
9626}
9627#endif  // VK_USE_PLATFORM_XLIB_KHR
9628
9629VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9630                                                                       VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
9631    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9632
9633    std::unique_lock<std::mutex> lock(global_lock);
9634    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9635    lock.unlock();
9636
9637    auto result =
9638        instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
9639
9640    if (result == VK_SUCCESS) {
9641        physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9642        physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
9643    }
9644
9645    return result;
9646}
9647
9648static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
9649                                                                   VkPhysicalDevice physicalDevice,
9650                                                                   VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
9651    std::unique_lock<std::mutex> lock(global_lock);
9652    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9653    physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9654    physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
9655}
9656
9657VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
9658                                                                        const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
9659                                                                        VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
9660    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9661
9662    auto result =
9663        instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
9664
9665    if (result == VK_SUCCESS) {
9666        PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
9667    }
9668
9669    return result;
9670}
9671
9672static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
9673                                                                   VkPhysicalDevice physicalDevice,
9674                                                                   VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
9675    std::unique_lock<std::mutex> lock(global_lock);
9676    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9677    physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9678    physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
9679    physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
9680    physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
9681    physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
9682    physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
9683    physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
9684    physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
9685    physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
9686    physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
9687    physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
9688}
9689
9690VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9691                                                                        VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
9692    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9693
9694    auto result =
9695        instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
9696
9697    if (result == VK_SUCCESS) {
9698        PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
9699    }
9700
9701    return result;
9702}
9703
9704VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
9705                                                                  VkSurfaceKHR surface, VkBool32 *pSupported) {
9706    bool skip = false;
9707    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9708
9709    std::unique_lock<std::mutex> lock(global_lock);
9710    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9711    auto surface_state = GetSurfaceState(instance_data, surface);
9712
9713    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2ee009ea,
9714                                              "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
9715
9716    lock.unlock();
9717
9718    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9719
9720    auto result =
9721        instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
9722
9723    if (result == VK_SUCCESS) {
9724        surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
9725    }
9726
9727    return result;
9728}
9729
9730VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9731                                                                       uint32_t *pPresentModeCount,
9732                                                                       VkPresentModeKHR *pPresentModes) {
9733    bool skip = false;
9734    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9735    std::unique_lock<std::mutex> lock(global_lock);
9736    // TODO: this isn't quite right. available modes may differ by surface AND physical device.
9737    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9738    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
9739
9740    if (pPresentModes) {
9741        // Compare the preliminary value of *pPresentModeCount with the value this time:
9742        auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
9743        switch (call_state) {
9744            case UNCALLED:
9745                skip |= log_msg(
9746                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9747                    HandleToUint64(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
9748                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
9749                    "value has been seen for pPresentModeCount.");
9750                break;
9751            default:
9752                // both query count and query details
9753                if (*pPresentModeCount != prev_mode_count) {
9754                    skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9755                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
9756                                    DEVLIMITS_COUNT_MISMATCH, "DL",
9757                                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that "
9758                                    "differs from the value "
9759                                    "(%u) that was returned when pPresentModes was NULL.",
9760                                    *pPresentModeCount, prev_mode_count);
9761                }
9762                break;
9763        }
9764    }
9765    lock.unlock();
9766
9767    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9768
9769    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
9770                                                                                        pPresentModes);
9771
9772    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9773        lock.lock();
9774
9775        if (*pPresentModeCount) {
9776            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
9777            if (*pPresentModeCount > physical_device_state->present_modes.size())
9778                physical_device_state->present_modes.resize(*pPresentModeCount);
9779        }
9780        if (pPresentModes) {
9781            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
9782            for (uint32_t i = 0; i < *pPresentModeCount; i++) {
9783                physical_device_state->present_modes[i] = pPresentModes[i];
9784            }
9785        }
9786    }
9787
9788    return result;
9789}
9790
9791VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9792                                                                  uint32_t *pSurfaceFormatCount,
9793                                                                  VkSurfaceFormatKHR *pSurfaceFormats) {
9794    bool skip = false;
9795    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9796    std::unique_lock<std::mutex> lock(global_lock);
9797    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9798    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
9799
9800    if (pSurfaceFormats) {
9801        auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
9802
9803        switch (call_state) {
9804            case UNCALLED:
9805                // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
9806                // didn't
9807                // previously call this function with a NULL value of pSurfaceFormats:
9808                skip |= log_msg(
9809                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9810                    HandleToUint64(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
9811                    "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
9812                    "value has been seen for pSurfaceFormats.");
9813                break;
9814            default:
9815                if (prev_format_count != *pSurfaceFormatCount) {
9816                    skip |= log_msg(
9817                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9818                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
9819                        DEVLIMITS_COUNT_MISMATCH, "DL",
9820                        "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats "
9821                        "set "
9822                        "to "
9823                        "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
9824                        *pSurfaceFormatCount, prev_format_count);
9825                }
9826                break;
9827        }
9828    }
9829    lock.unlock();
9830
9831    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9832
9833    // Call down the call chain:
9834    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
9835                                                                                   pSurfaceFormats);
9836
9837    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9838        lock.lock();
9839
9840        if (*pSurfaceFormatCount) {
9841            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
9842            if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
9843                physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
9844        }
9845        if (pSurfaceFormats) {
9846            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
9847            for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
9848                physical_device_state->surface_formats[i] = pSurfaceFormats[i];
9849            }
9850        }
9851    }
9852    return result;
9853}
9854
9855static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
9856                                                              uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
9857    std::unique_lock<std::mutex> lock(global_lock);
9858    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9859    if (*pSurfaceFormatCount) {
9860        if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
9861            physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
9862        }
9863        if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
9864            physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
9865    }
9866    if (pSurfaceFormats) {
9867        if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
9868            physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
9869        }
9870        for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
9871            physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
9872        }
9873    }
9874}
9875
9876VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
9877                                                                   const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
9878                                                                   uint32_t *pSurfaceFormatCount,
9879                                                                   VkSurfaceFormat2KHR *pSurfaceFormats) {
9880    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9881    auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
9882                                                                                   pSurfaceFormatCount, pSurfaceFormats);
9883    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9884        PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
9885    }
9886    return result;
9887}
9888
9889VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
9890                                                            const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
9891                                                            const VkAllocationCallbacks *pAllocator,
9892                                                            VkDebugReportCallbackEXT *pMsgCallback) {
9893    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9894    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
9895    if (VK_SUCCESS == res) {
9896        std::lock_guard<std::mutex> lock(global_lock);
9897        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
9898    }
9899    return res;
9900}
9901
9902VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
9903                                                         const VkAllocationCallbacks *pAllocator) {
9904    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9905    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
9906    std::lock_guard<std::mutex> lock(global_lock);
9907    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
9908}
9909
9910VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
9911                                                 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
9912                                                 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
9913    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9914    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
9915}
9916
9917VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
9918    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
9919}
9920
9921VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
9922                                                              VkLayerProperties *pProperties) {
9923    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
9924}
9925
9926VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
9927                                                                    VkExtensionProperties *pProperties) {
9928    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
9929        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
9930
9931    return VK_ERROR_LAYER_NOT_PRESENT;
9932}
9933
9934VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
9935                                                                  uint32_t *pCount, VkExtensionProperties *pProperties) {
9936    if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(0, NULL, pCount, pProperties);
9937
9938    assert(physicalDevice);
9939
9940    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9941    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
9942}
9943
9944VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHX(
9945    VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHX *pPhysicalDeviceGroupProperties) {
9946    bool skip = false;
9947    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9948
9949    if (instance_data) {
9950        // For this instance, flag when EnumeratePhysicalDeviceGroupsKHX goes to QUERY_COUNT and then QUERY_DETAILS.
9951        if (NULL == pPhysicalDeviceGroupProperties) {
9952            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
9953        } else {
9954            if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
9955                // Flag warning here. You can call this without having queried the count, but it may not be
9956                // robust on platforms with multiple physical devices.
9957                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9958                                VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9959                                "Call sequence has vkEnumeratePhysicalDeviceGroupsKHX() w/ non-NULL "
9960                                "pPhysicalDeviceGroupProperties. You should first "
9961                                "call vkEnumeratePhysicalDeviceGroupsKHX() w/ NULL pPhysicalDeviceGroupProperties to query "
9962                                "pPhysicalDeviceGroupCount.");
9963            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
9964            else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
9965                // Having actual count match count from app is not a requirement, so this can be a warning
9966                skip |=
9967                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9968                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9969                            "Call to vkEnumeratePhysicalDeviceGroupsKHX() w/ pPhysicalDeviceGroupCount value %u, but actual count "
9970                            "supported by this instance is %u.",
9971                            *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
9972            }
9973            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
9974        }
9975        if (skip) {
9976            return VK_ERROR_VALIDATION_FAILED_EXT;
9977        }
9978        VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHX(instance, pPhysicalDeviceGroupCount,
9979            pPhysicalDeviceGroupProperties);
9980        if (NULL == pPhysicalDeviceGroupProperties) {
9981            instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
9982        } else if (result == VK_SUCCESS) { // Save physical devices
9983            for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
9984                for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
9985                    VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
9986                    auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
9987                    phys_device_state.phys_device = cur_phys_dev;
9988                    // Init actual features for each physical device
9989                    instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
9990                }
9991            }
9992        }
9993        return result;
9994    } else {
9995        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
9996                DEVLIMITS_INVALID_INSTANCE, "DL",
9997                "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDeviceGroupsKHX().",
9998                HandleToUint64(instance));
9999    }
10000    return VK_ERROR_VALIDATION_FAILED_EXT;
10001}
10002
10003VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
10004                                                                 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
10005                                                                 const VkAllocationCallbacks *pAllocator,
10006                                                                 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
10007    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10008    VkResult result =
10009        dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
10010    if (VK_SUCCESS == result) {
10011        std::lock_guard<std::mutex> lock(global_lock);
10012        // Shadow template createInfo for later updates
10013        safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info =
10014            new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo);
10015        std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
10016        dev_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
10017    }
10018    return result;
10019}
10020
10021VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
10022                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10023                                                              const VkAllocationCallbacks *pAllocator) {
10024    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10025    std::unique_lock<std::mutex> lock(global_lock);
10026    dev_data->desc_template_map.erase(descriptorUpdateTemplate);
10027    lock.unlock();
10028    dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
10029}
10030
10031// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
10032static void PostCallRecordUpdateDescriptorSetWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
10033                                                             VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10034                                                             const void *pData) {
10035    auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
10036    if (template_map_entry == device_data->desc_template_map.end()) {
10037        assert(0);
10038    }
10039
10040    cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
10041}
10042
10043VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
10044                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10045                                                              const void *pData) {
10046    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10047    device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
10048
10049    PostCallRecordUpdateDescriptorSetWithTemplateKHR(device_data, descriptorSet, descriptorUpdateTemplate, pData);
10050}
10051
10052VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
10053                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10054                                                               VkPipelineLayout layout, uint32_t set, const void *pData) {
10055    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10056    dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
10057}
10058
10059static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
10060                                                                     VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
10061                                                                     VkDisplayPlanePropertiesKHR *pProperties) {
10062    std::unique_lock<std::mutex> lock(global_lock);
10063    auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
10064
10065    if (*pPropertyCount) {
10066        if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
10067            physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
10068        }
10069        physical_device_state->display_plane_property_count = *pPropertyCount;
10070    }
10071    if (pProperties) {
10072        if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
10073            physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
10074        }
10075    }
10076}
10077
10078VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
10079                                                                          VkDisplayPlanePropertiesKHR *pProperties) {
10080    VkResult result = VK_SUCCESS;
10081    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10082
10083    result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
10084
10085    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
10086        PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
10087    }
10088
10089    return result;
10090}
10091
10092static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
10093                                                                    VkPhysicalDevice physicalDevice, uint32_t planeIndex,
10094                                                                    const char *api_name) {
10095    bool skip = false;
10096    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
10097    if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
10098        skip |= log_msg(
10099            instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10100            HandleToUint64(physicalDevice), __LINE__, SWAPCHAIN_GET_SUPPORTED_DISPLAYS_WITHOUT_QUERY, "DL",
10101            "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR.", api_name);
10102    } else {
10103        if (planeIndex >= physical_device_state->display_plane_property_count) {
10104            skip |= log_msg(
10105                instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10106                HandleToUint64(physicalDevice), __LINE__, VALIDATION_ERROR_29c009c2, "DL",
10107                "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR. "
10108                "Do you have the plane index hardcoded? %s",
10109                api_name, physical_device_state->display_plane_property_count - 1, validation_error_map[VALIDATION_ERROR_29c009c2]);
10110        }
10111    }
10112    return skip;
10113}
10114
10115static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
10116                                                               uint32_t planeIndex) {
10117    bool skip = false;
10118    std::lock_guard<std::mutex> lock(global_lock);
10119    skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
10120                                                                    "vkGetDisplayPlaneSupportedDisplaysKHR");
10121    return skip;
10122}
10123
10124VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
10125                                                                   uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
10126    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10127    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10128    bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
10129    if (!skip) {
10130        result =
10131            instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
10132    }
10133    return result;
10134}
10135
10136static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
10137                                                          uint32_t planeIndex) {
10138    bool skip = false;
10139    std::lock_guard<std::mutex> lock(global_lock);
10140    skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
10141                                                                    "vkGetDisplayPlaneCapabilitiesKHR");
10142    return skip;
10143}
10144
10145VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
10146                                                              uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
10147    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10148    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10149    bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
10150
10151    if (!skip) {
10152        result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
10153    }
10154
10155    return result;
10156}
10157
10158VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
10159VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
10160VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
10161
10162// Map of all APIs to be intercepted by this layer
10163static const std::unordered_map<std::string, void*> name_to_funcptr_map = {
10164    {"vkGetInstanceProcAddr", (void*)GetInstanceProcAddr},
10165    {"vk_layerGetPhysicalDeviceProcAddr", (void*)GetPhysicalDeviceProcAddr},
10166    {"vkGetDeviceProcAddr", (void*)GetDeviceProcAddr},
10167    {"vkCreateInstance", (void*)CreateInstance},
10168    {"vkCreateDevice", (void*)CreateDevice},
10169    {"vkEnumeratePhysicalDevices", (void*)EnumeratePhysicalDevices},
10170    {"vkGetPhysicalDeviceQueueFamilyProperties", (void*)GetPhysicalDeviceQueueFamilyProperties},
10171    {"vkDestroyInstance", (void*)DestroyInstance},
10172    {"vkEnumerateInstanceLayerProperties", (void*)EnumerateInstanceLayerProperties},
10173    {"vkEnumerateDeviceLayerProperties", (void*)EnumerateDeviceLayerProperties},
10174    {"vkEnumerateInstanceExtensionProperties", (void*)EnumerateInstanceExtensionProperties},
10175    {"vkEnumerateDeviceExtensionProperties", (void*)EnumerateDeviceExtensionProperties},
10176    {"vkCreateDescriptorUpdateTemplateKHR", (void*)CreateDescriptorUpdateTemplateKHR},
10177    {"vkDestroyDescriptorUpdateTemplateKHR", (void*)DestroyDescriptorUpdateTemplateKHR},
10178    {"vkUpdateDescriptorSetWithTemplateKHR", (void*)UpdateDescriptorSetWithTemplateKHR},
10179    {"vkCmdPushDescriptorSetWithTemplateKHR", (void*)CmdPushDescriptorSetWithTemplateKHR},
10180    {"vkCreateSwapchainKHR", (void*)CreateSwapchainKHR},
10181    {"vkDestroySwapchainKHR", (void*)DestroySwapchainKHR},
10182    {"vkGetSwapchainImagesKHR", (void*)GetSwapchainImagesKHR},
10183    {"vkAcquireNextImageKHR", (void*)AcquireNextImageKHR},
10184    {"vkQueuePresentKHR", (void*)QueuePresentKHR},
10185    {"vkQueueSubmit", (void*)QueueSubmit},
10186    {"vkWaitForFences", (void*)WaitForFences},
10187    {"vkGetFenceStatus", (void*)GetFenceStatus},
10188    {"vkQueueWaitIdle", (void*)QueueWaitIdle},
10189    {"vkDeviceWaitIdle", (void*)DeviceWaitIdle},
10190    {"vkGetDeviceQueue", (void*)GetDeviceQueue},
10191    {"vkDestroyDevice", (void*)DestroyDevice},
10192    {"vkDestroyFence", (void*)DestroyFence},
10193    {"vkResetFences", (void*)ResetFences},
10194    {"vkDestroySemaphore", (void*)DestroySemaphore},
10195    {"vkDestroyEvent", (void*)DestroyEvent},
10196    {"vkDestroyQueryPool", (void*)DestroyQueryPool},
10197    {"vkDestroyBuffer", (void*)DestroyBuffer},
10198    {"vkDestroyBufferView", (void*)DestroyBufferView},
10199    {"vkDestroyImage", (void*)DestroyImage},
10200    {"vkDestroyImageView", (void*)DestroyImageView},
10201    {"vkDestroyShaderModule", (void*)DestroyShaderModule},
10202    {"vkDestroyPipeline", (void*)DestroyPipeline},
10203    {"vkDestroyPipelineLayout", (void*)DestroyPipelineLayout},
10204    {"vkDestroySampler", (void*)DestroySampler},
10205    {"vkDestroyDescriptorSetLayout", (void*)DestroyDescriptorSetLayout},
10206    {"vkDestroyDescriptorPool", (void*)DestroyDescriptorPool},
10207    {"vkDestroyFramebuffer", (void*)DestroyFramebuffer},
10208    {"vkDestroyRenderPass", (void*)DestroyRenderPass},
10209    {"vkCreateBuffer", (void*)CreateBuffer},
10210    {"vkCreateBufferView", (void*)CreateBufferView},
10211    {"vkCreateImage", (void*)CreateImage},
10212    {"vkCreateImageView", (void*)CreateImageView},
10213    {"vkCreateFence", (void*)CreateFence},
10214    {"vkCreatePipelineCache", (void*)CreatePipelineCache},
10215    {"vkDestroyPipelineCache", (void*)DestroyPipelineCache},
10216    {"vkGetPipelineCacheData", (void*)GetPipelineCacheData},
10217    {"vkMergePipelineCaches", (void*)MergePipelineCaches},
10218    {"vkCreateGraphicsPipelines", (void*)CreateGraphicsPipelines},
10219    {"vkCreateComputePipelines", (void*)CreateComputePipelines},
10220    {"vkCreateSampler", (void*)CreateSampler},
10221    {"vkCreateDescriptorSetLayout", (void*)CreateDescriptorSetLayout},
10222    {"vkCreatePipelineLayout", (void*)CreatePipelineLayout},
10223    {"vkCreateDescriptorPool", (void*)CreateDescriptorPool},
10224    {"vkResetDescriptorPool", (void*)ResetDescriptorPool},
10225    {"vkAllocateDescriptorSets", (void*)AllocateDescriptorSets},
10226    {"vkFreeDescriptorSets", (void*)FreeDescriptorSets},
10227    {"vkUpdateDescriptorSets", (void*)UpdateDescriptorSets},
10228    {"vkCreateCommandPool", (void*)CreateCommandPool},
10229    {"vkDestroyCommandPool", (void*)DestroyCommandPool},
10230    {"vkResetCommandPool", (void*)ResetCommandPool},
10231    {"vkCreateQueryPool", (void*)CreateQueryPool},
10232    {"vkAllocateCommandBuffers", (void*)AllocateCommandBuffers},
10233    {"vkFreeCommandBuffers", (void*)FreeCommandBuffers},
10234    {"vkBeginCommandBuffer", (void*)BeginCommandBuffer},
10235    {"vkEndCommandBuffer", (void*)EndCommandBuffer},
10236    {"vkResetCommandBuffer", (void*)ResetCommandBuffer},
10237    {"vkCmdBindPipeline", (void*)CmdBindPipeline},
10238    {"vkCmdSetViewport", (void*)CmdSetViewport},
10239    {"vkCmdSetScissor", (void*)CmdSetScissor},
10240    {"vkCmdSetLineWidth", (void*)CmdSetLineWidth},
10241    {"vkCmdSetDepthBias", (void*)CmdSetDepthBias},
10242    {"vkCmdSetBlendConstants", (void*)CmdSetBlendConstants},
10243    {"vkCmdSetDepthBounds", (void*)CmdSetDepthBounds},
10244    {"vkCmdSetStencilCompareMask", (void*)CmdSetStencilCompareMask},
10245    {"vkCmdSetStencilWriteMask", (void*)CmdSetStencilWriteMask},
10246    {"vkCmdSetStencilReference", (void*)CmdSetStencilReference},
10247    {"vkCmdBindDescriptorSets", (void*)CmdBindDescriptorSets},
10248    {"vkCmdBindVertexBuffers", (void*)CmdBindVertexBuffers},
10249    {"vkCmdBindIndexBuffer", (void*)CmdBindIndexBuffer},
10250    {"vkCmdDraw", (void*)CmdDraw},
10251    {"vkCmdDrawIndexed", (void*)CmdDrawIndexed},
10252    {"vkCmdDrawIndirect", (void*)CmdDrawIndirect},
10253    {"vkCmdDrawIndexedIndirect", (void*)CmdDrawIndexedIndirect},
10254    {"vkCmdDispatch", (void*)CmdDispatch},
10255    {"vkCmdDispatchIndirect", (void*)CmdDispatchIndirect},
10256    {"vkCmdCopyBuffer", (void*)CmdCopyBuffer},
10257    {"vkCmdCopyImage", (void*)CmdCopyImage},
10258    {"vkCmdBlitImage", (void*)CmdBlitImage},
10259    {"vkCmdCopyBufferToImage", (void*)CmdCopyBufferToImage},
10260    {"vkCmdCopyImageToBuffer", (void*)CmdCopyImageToBuffer},
10261    {"vkCmdUpdateBuffer", (void*)CmdUpdateBuffer},
10262    {"vkCmdFillBuffer", (void*)CmdFillBuffer},
10263    {"vkCmdClearColorImage", (void*)CmdClearColorImage},
10264    {"vkCmdClearDepthStencilImage", (void*)CmdClearDepthStencilImage},
10265    {"vkCmdClearAttachments", (void*)CmdClearAttachments},
10266    {"vkCmdResolveImage", (void*)CmdResolveImage},
10267    {"vkGetImageSubresourceLayout", (void*)GetImageSubresourceLayout},
10268    {"vkCmdSetEvent", (void*)CmdSetEvent},
10269    {"vkCmdResetEvent", (void*)CmdResetEvent},
10270    {"vkCmdWaitEvents", (void*)CmdWaitEvents},
10271    {"vkCmdPipelineBarrier", (void*)CmdPipelineBarrier},
10272    {"vkCmdBeginQuery", (void*)CmdBeginQuery},
10273    {"vkCmdEndQuery", (void*)CmdEndQuery},
10274    {"vkCmdResetQueryPool", (void*)CmdResetQueryPool},
10275    {"vkCmdCopyQueryPoolResults", (void*)CmdCopyQueryPoolResults},
10276    {"vkCmdPushConstants", (void*)CmdPushConstants},
10277    {"vkCmdWriteTimestamp", (void*)CmdWriteTimestamp},
10278    {"vkCreateFramebuffer", (void*)CreateFramebuffer},
10279    {"vkCreateShaderModule", (void*)CreateShaderModule},
10280    {"vkCreateRenderPass", (void*)CreateRenderPass},
10281    {"vkCmdBeginRenderPass", (void*)CmdBeginRenderPass},
10282    {"vkCmdNextSubpass", (void*)CmdNextSubpass},
10283    {"vkCmdEndRenderPass", (void*)CmdEndRenderPass},
10284    {"vkCmdExecuteCommands", (void*)CmdExecuteCommands},
10285    {"vkSetEvent", (void*)SetEvent},
10286    {"vkMapMemory", (void*)MapMemory},
10287    {"vkUnmapMemory", (void*)UnmapMemory},
10288    {"vkFlushMappedMemoryRanges", (void*)FlushMappedMemoryRanges},
10289    {"vkInvalidateMappedMemoryRanges", (void*)InvalidateMappedMemoryRanges},
10290    {"vkAllocateMemory", (void*)AllocateMemory},
10291    {"vkFreeMemory", (void*)FreeMemory},
10292    {"vkBindBufferMemory", (void*)BindBufferMemory},
10293    {"vkGetBufferMemoryRequirements", (void*)GetBufferMemoryRequirements},
10294    {"vkGetImageMemoryRequirements", (void*)GetImageMemoryRequirements},
10295    {"vkGetQueryPoolResults", (void*)GetQueryPoolResults},
10296    {"vkBindImageMemory", (void*)BindImageMemory},
10297    {"vkQueueBindSparse", (void*)QueueBindSparse},
10298    {"vkCreateSemaphore", (void*)CreateSemaphore},
10299    {"vkCreateEvent", (void*)CreateEvent},
10300#ifdef VK_USE_PLATFORM_ANDROID_KHR
10301    {"vkCreateAndroidSurfaceKHR", (void*)CreateAndroidSurfaceKHR},
10302#endif
10303#ifdef VK_USE_PLATFORM_MIR_KHR
10304    {"vkCreateMirSurfaceKHR", (void*)CreateMirSurfaceKHR},
10305    {"vkGetPhysicalDeviceMirPresentationSupportKHR", (void*)GetPhysicalDeviceMirPresentationSupportKHR},
10306#endif
10307#ifdef VK_USE_PLATFORM_WAYLAND_KHR
10308    {"vkCreateWaylandSurfaceKHR", (void*)CreateWaylandSurfaceKHR},
10309    {"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void*)GetPhysicalDeviceWaylandPresentationSupportKHR},
10310#endif
10311#ifdef VK_USE_PLATFORM_WIN32_KHR
10312    {"vkCreateWin32SurfaceKHR", (void*)CreateWin32SurfaceKHR},
10313    {"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void*)GetPhysicalDeviceWin32PresentationSupportKHR},
10314#endif
10315#ifdef VK_USE_PLATFORM_XCB_KHR
10316    {"vkCreateXcbSurfaceKHR", (void*)CreateXcbSurfaceKHR},
10317    {"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void*)GetPhysicalDeviceXcbPresentationSupportKHR},
10318#endif
10319#ifdef VK_USE_PLATFORM_XLIB_KHR
10320    {"vkCreateXlibSurfaceKHR", (void*)CreateXlibSurfaceKHR},
10321    {"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void*)GetPhysicalDeviceXlibPresentationSupportKHR},
10322#endif
10323    {"vkCreateDisplayPlaneSurfaceKHR", (void*)CreateDisplayPlaneSurfaceKHR},
10324    {"vkDestroySurfaceKHR", (void*)DestroySurfaceKHR},
10325    {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void*)GetPhysicalDeviceSurfaceCapabilitiesKHR},
10326    {"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void*)GetPhysicalDeviceSurfaceCapabilities2KHR},
10327    {"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void*)GetPhysicalDeviceSurfaceCapabilities2EXT},
10328    {"vkGetPhysicalDeviceSurfaceSupportKHR", (void*)GetPhysicalDeviceSurfaceSupportKHR},
10329    {"vkGetPhysicalDeviceSurfacePresentModesKHR", (void*)GetPhysicalDeviceSurfacePresentModesKHR},
10330    {"vkGetPhysicalDeviceSurfaceFormatsKHR", (void*)GetPhysicalDeviceSurfaceFormatsKHR},
10331    {"vkGetPhysicalDeviceSurfaceFormats2KHR", (void*)GetPhysicalDeviceSurfaceFormats2KHR},
10332    {"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void*)GetPhysicalDeviceQueueFamilyProperties2KHR},
10333    {"vkEnumeratePhysicalDeviceGroupsKHX", (void*)EnumeratePhysicalDeviceGroupsKHX},
10334    {"vkCreateDebugReportCallbackEXT", (void*)CreateDebugReportCallbackEXT},
10335    {"vkDestroyDebugReportCallbackEXT", (void*)DestroyDebugReportCallbackEXT},
10336    {"vkDebugReportMessageEXT", (void*)DebugReportMessageEXT},
10337    {"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void*)GetPhysicalDeviceDisplayPlanePropertiesKHR},
10338    {"GetDisplayPlaneSupportedDisplaysKHR", (void*)GetDisplayPlaneSupportedDisplaysKHR},
10339    {"GetDisplayPlaneCapabilitiesKHR", (void*)GetDisplayPlaneCapabilitiesKHR},
10340};
10341
10342VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
10343    assert(device);
10344    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10345
10346    // Is API to be intercepted by this layer?
10347    const auto &item = name_to_funcptr_map.find(funcName);
10348    if (item != name_to_funcptr_map.end()) {
10349        return reinterpret_cast<PFN_vkVoidFunction>(item->second);
10350    }
10351
10352    auto &table = device_data->dispatch_table;
10353    if (!table.GetDeviceProcAddr) return nullptr;
10354    return table.GetDeviceProcAddr(device, funcName);
10355}
10356
10357VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
10358    instance_layer_data *instance_data;
10359    // Is API to be intercepted by this layer?
10360    const auto &item = name_to_funcptr_map.find(funcName);
10361    if (item != name_to_funcptr_map.end()) {
10362        return reinterpret_cast<PFN_vkVoidFunction>(item->second);
10363    }
10364
10365    instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10366    auto &table = instance_data->dispatch_table;
10367    if (!table.GetInstanceProcAddr) return nullptr;
10368    return table.GetInstanceProcAddr(instance, funcName);
10369}
10370
10371VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
10372    assert(instance);
10373    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10374
10375    auto &table = instance_data->dispatch_table;
10376    if (!table.GetPhysicalDeviceProcAddr) return nullptr;
10377    return table.GetPhysicalDeviceProcAddr(instance, funcName);
10378}
10379
10380}  // namespace core_validation
10381
10382// loader-layer interface v0, just wrappers since there is only a layer
10383
10384VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
10385                                                                                      VkExtensionProperties *pProperties) {
10386    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
10387}
10388
10389VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
10390                                                                                  VkLayerProperties *pProperties) {
10391    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
10392}
10393
10394VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
10395                                                                                VkLayerProperties *pProperties) {
10396    // the layer command handles VK_NULL_HANDLE just fine internally
10397    assert(physicalDevice == VK_NULL_HANDLE);
10398    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
10399}
10400
10401VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10402                                                                                    const char *pLayerName, uint32_t *pCount,
10403                                                                                    VkExtensionProperties *pProperties) {
10404    // the layer command handles VK_NULL_HANDLE just fine internally
10405    assert(physicalDevice == VK_NULL_HANDLE);
10406    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10407}
10408
10409VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10410    return core_validation::GetDeviceProcAddr(dev, funcName);
10411}
10412
10413VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10414    return core_validation::GetInstanceProcAddr(instance, funcName);
10415}
10416
10417VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
10418                                                                                           const char *funcName) {
10419    return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
10420}
10421
10422VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
10423    assert(pVersionStruct != NULL);
10424    assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
10425
10426    // Fill in the function pointers if our version is at least capable of having the structure contain them.
10427    if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
10428        pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
10429        pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
10430        pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
10431    }
10432
10433    if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
10434        core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
10435    } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
10436        pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
10437    }
10438
10439    return VK_SUCCESS;
10440}
10441