core_validation.cpp revision 3a4c679ed508b10fd119bb97c127c79b5d126d74
1/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
34 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
39#include <algorithm>
40#include <assert.h>
41#include <iostream>
42#include <list>
43#include <map>
44#include <memory>
45#include <mutex>
46#include <set>
47#include <sstream>
48#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
51#include <string>
52#include <inttypes.h>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_enum_string_helper.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "core_validation.h"
64#include "buffer_validation.h"
65#include "shader_validation.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_extension_utils.h"
69#include "vk_layer_utils.h"
70
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
75#define LOGCONSOLE(...)      \
76    {                        \
77        printf(__VA_ARGS__); \
78        printf("\n");        \
79    }
80#endif
81
82// TODO: remove on NDK update (r15 will probably have proper STL impl)
83#ifdef __ANDROID__
84namespace std {
85
86template <typename T>
87std::string to_string(T var) {
88    std::ostringstream ss;
89    ss << var;
90    return ss.str();
91}
92}
93#endif
94
95// This intentionally includes a cpp file
96#include "vk_safe_struct.cpp"
97
98namespace core_validation {
99
100using std::unordered_map;
101using std::unordered_set;
102using std::unique_ptr;
103using std::vector;
104using std::string;
105using std::stringstream;
106using std::max;
107
108// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
109// Object value will be used to identify them internally.
110static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
111// 2nd special memory handle used to flag object as unbound from memory
112static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
113
114// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
115// by the extent of a swapchain targeting the surface.
116static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
117
118struct instance_layer_data {
119    VkInstance instance = VK_NULL_HANDLE;
120    debug_report_data *report_data = nullptr;
121    std::vector<VkDebugReportCallbackEXT> logging_callback;
122    VkLayerInstanceDispatchTable dispatch_table;
123
124    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
125    uint32_t physical_devices_count = 0;
126    CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
127    uint32_t physical_device_groups_count = 0;
128    CHECK_DISABLED disabled = {};
129
130    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
131    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
132
133    InstanceExtensions extensions;
134};
135
136struct layer_data {
137    debug_report_data *report_data = nullptr;
138    VkLayerDispatchTable dispatch_table;
139
140    DeviceExtensions extensions = {};
141    unordered_set<VkQueue> queues;  // All queues under given device
142    // Layer specific data
143    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
144    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
145    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
146    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
147    unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
148    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
149    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
150    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
151    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
152    unordered_map<VkDescriptorSetLayout, std::unique_ptr<cvdescriptorset::DescriptorSetLayout>> descriptorSetLayoutMap;
153    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
154    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
155    unordered_map<VkFence, FENCE_NODE> fenceMap;
156    unordered_map<VkQueue, QUEUE_STATE> queueMap;
157    unordered_map<VkEvent, EVENT_STATE> eventMap;
158    unordered_map<QueryObject, bool> queryToStateMap;
159    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
160    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
161    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
162    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
163    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
164    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
165    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
166    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
167    unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
168    unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
169
170    VkDevice device = VK_NULL_HANDLE;
171    VkPhysicalDevice physical_device = VK_NULL_HANDLE;
172
173    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
174
175    VkPhysicalDeviceFeatures enabled_features = {};
176    // Device specific data
177    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
178    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
179    VkPhysicalDeviceProperties phys_dev_props = {};
180};
181
182// TODO : Do we need to guard access to layer_data_map w/ lock?
183static unordered_map<void *, layer_data *> layer_data_map;
184static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
185
186static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
187
188static const VkLayerProperties global_layer = {
189    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
190};
191
192template <class TCreateInfo>
193void ValidateLayerOrdering(const TCreateInfo &createInfo) {
194    bool foundLayer = false;
195    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
196        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
197            foundLayer = true;
198        }
199        // This has to be logged to console as we don't have a callback at this point.
200        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
201            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
202        }
203    }
204}
205
206// TODO : This can be much smarter, using separate locks for separate global data
207static std::mutex global_lock;
208
209// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
210IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
211    auto iv_it = dev_data->imageViewMap.find(image_view);
212    if (iv_it == dev_data->imageViewMap.end()) {
213        return nullptr;
214    }
215    return iv_it->second.get();
216}
217// Return sampler node ptr for specified sampler or else NULL
218SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
219    auto sampler_it = dev_data->samplerMap.find(sampler);
220    if (sampler_it == dev_data->samplerMap.end()) {
221        return nullptr;
222    }
223    return sampler_it->second.get();
224}
225// Return image state ptr for specified image or else NULL
226IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
227    auto img_it = dev_data->imageMap.find(image);
228    if (img_it == dev_data->imageMap.end()) {
229        return nullptr;
230    }
231    return img_it->second.get();
232}
233// Return buffer state ptr for specified buffer or else NULL
234BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
235    auto buff_it = dev_data->bufferMap.find(buffer);
236    if (buff_it == dev_data->bufferMap.end()) {
237        return nullptr;
238    }
239    return buff_it->second.get();
240}
241// Return swapchain node for specified swapchain or else NULL
242SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
243    auto swp_it = dev_data->swapchainMap.find(swapchain);
244    if (swp_it == dev_data->swapchainMap.end()) {
245        return nullptr;
246    }
247    return swp_it->second.get();
248}
249// Return buffer node ptr for specified buffer or else NULL
250BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
251    auto bv_it = dev_data->bufferViewMap.find(buffer_view);
252    if (bv_it == dev_data->bufferViewMap.end()) {
253        return nullptr;
254    }
255    return bv_it->second.get();
256}
257
258FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
259    auto it = dev_data->fenceMap.find(fence);
260    if (it == dev_data->fenceMap.end()) {
261        return nullptr;
262    }
263    return &it->second;
264}
265
266EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
267    auto it = dev_data->eventMap.find(event);
268    if (it == dev_data->eventMap.end()) {
269        return nullptr;
270    }
271    return &it->second;
272}
273
274QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
275    auto it = dev_data->queryPoolMap.find(query_pool);
276    if (it == dev_data->queryPoolMap.end()) {
277        return nullptr;
278    }
279    return &it->second;
280}
281
282QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
283    auto it = dev_data->queueMap.find(queue);
284    if (it == dev_data->queueMap.end()) {
285        return nullptr;
286    }
287    return &it->second;
288}
289
290SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
291    auto it = dev_data->semaphoreMap.find(semaphore);
292    if (it == dev_data->semaphoreMap.end()) {
293        return nullptr;
294    }
295    return &it->second;
296}
297
298COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
299    auto it = dev_data->commandPoolMap.find(pool);
300    if (it == dev_data->commandPoolMap.end()) {
301        return nullptr;
302    }
303    return &it->second;
304}
305
306PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
307    auto it = instance_data->physical_device_map.find(phys);
308    if (it == instance_data->physical_device_map.end()) {
309        return nullptr;
310    }
311    return &it->second;
312}
313
314SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
315    auto it = instance_data->surface_map.find(surface);
316    if (it == instance_data->surface_map.end()) {
317        return nullptr;
318    }
319    return &it->second;
320}
321
322DeviceExtensions const *GetEnabledExtensions(layer_data const *dev_data) {
323    return &dev_data->extensions;
324}
325
326// Return ptr to memory binding for given handle of specified type
327static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
328    switch (type) {
329        case kVulkanObjectTypeImage:
330            return GetImageState(dev_data, VkImage(handle));
331        case kVulkanObjectTypeBuffer:
332            return GetBufferState(dev_data, VkBuffer(handle));
333        default:
334            break;
335    }
336    return nullptr;
337}
338// prototype
339GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
340
341// Return ptr to info in map container containing mem, or NULL if not found
342//  Calls to this function should be wrapped in mutex
343DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
344    auto mem_it = dev_data->memObjMap.find(mem);
345    if (mem_it == dev_data->memObjMap.end()) {
346        return NULL;
347    }
348    return mem_it->second.get();
349}
350
351static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
352                             const VkMemoryAllocateInfo *pAllocateInfo) {
353    assert(object != NULL);
354
355    dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
356}
357
358// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
359static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle, VulkanObjectType type,
360                                  const char *functionName) {
361    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
362    if (mem_info) {
363        if (!mem_info->bound_ranges[bound_object_handle].valid) {
364            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
365                           HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
366                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
367                           ", please fill the memory before using.",
368                           functionName, HandleToUint64(mem), object_string[type], bound_object_handle);
369        }
370    }
371    return false;
372}
373// For given image_state
374//  If mem is special swapchain key, then verify that image_state valid member is true
375//  Else verify that the image's bound memory range is valid
376bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
377    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
378        if (!image_state->valid) {
379            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
380                           HandleToUint64(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
381                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
382                           functionName, HandleToUint64(image_state->image));
383        }
384    } else {
385        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), kVulkanObjectTypeImage,
386                                     functionName);
387    }
388    return false;
389}
390// For given buffer_state, verify that the range it's bound to is valid
391bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
392    return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer,
393                                 functionName);
394}
395// For the given memory allocation, set the range bound by the given handle object to the valid param value
396static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
397    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
398    if (mem_info) {
399        mem_info->bound_ranges[handle].valid = valid;
400    }
401}
402// For given image node
403//  If mem is special swapchain key, then set entire image_state to valid param value
404//  Else set the image's bound memory range to valid param value
405void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
406    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
407        image_state->valid = valid;
408    } else {
409        SetMemoryValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), valid);
410    }
411}
412// For given buffer node set the buffer's bound memory range to valid param value
413void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
414    SetMemoryValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), valid);
415}
416
417// Create binding link between given sampler and command buffer node
418void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
419    sampler_state->cb_bindings.insert(cb_node);
420    cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
421}
422
423// Create binding link between given image node and command buffer node
424void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
425    // Skip validation if this image was created through WSI
426    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
427        // First update CB binding in MemObj mini CB list
428        for (auto mem_binding : image_state->GetBoundMemory()) {
429            DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
430            if (pMemInfo) {
431                pMemInfo->cb_bindings.insert(cb_node);
432                // Now update CBInfo's Mem reference list
433                cb_node->memObjs.insert(mem_binding);
434            }
435        }
436        // Now update cb binding for image
437        cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
438        image_state->cb_bindings.insert(cb_node);
439    }
440}
441
442// Create binding link between given image view node and its image with command buffer node
443void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
444    // First add bindings for imageView
445    view_state->cb_bindings.insert(cb_node);
446    cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
447    auto image_state = GetImageState(dev_data, view_state->create_info.image);
448    // Add bindings for image within imageView
449    if (image_state) {
450        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
451    }
452}
453
454// Create binding link between given buffer node and command buffer node
455void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
456    // First update CB binding in MemObj mini CB list
457    for (auto mem_binding : buffer_state->GetBoundMemory()) {
458        DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
459        if (pMemInfo) {
460            pMemInfo->cb_bindings.insert(cb_node);
461            // Now update CBInfo's Mem reference list
462            cb_node->memObjs.insert(mem_binding);
463        }
464    }
465    // Now update cb binding for buffer
466    cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
467    buffer_state->cb_bindings.insert(cb_node);
468}
469
470// Create binding link between given buffer view node and its buffer with command buffer node
471void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
472    // First add bindings for bufferView
473    view_state->cb_bindings.insert(cb_node);
474    cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
475    auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
476    // Add bindings for buffer within bufferView
477    if (buffer_state) {
478        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
479    }
480}
481
482// For every mem obj bound to particular CB, free bindings related to that CB
483static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
484    if (cb_node) {
485        if (cb_node->memObjs.size() > 0) {
486            for (auto mem : cb_node->memObjs) {
487                DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
488                if (pInfo) {
489                    pInfo->cb_bindings.erase(cb_node);
490                }
491            }
492            cb_node->memObjs.clear();
493        }
494        cb_node->validate_functions.clear();
495    }
496}
497
498// Clear a single object binding from given memory object, or report error if binding is missing
499static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
500    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
501    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
502    if (mem_info) {
503        mem_info->obj_bindings.erase({handle, type});
504    }
505    return false;
506}
507
508// ClearMemoryObjectBindings clears the binding of objects to memory
509//  For the given object it pulls the memory bindings and makes sure that the bindings
510//  no longer refer to the object being cleared. This occurs when objects are destroyed.
511bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
512    bool skip = false;
513    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
514    if (mem_binding) {
515        if (!mem_binding->sparse) {
516            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
517        } else {  // Sparse, clear all bindings
518            for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
519                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
520            }
521        }
522    }
523    return skip;
524}
525
526// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
527bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
528                              const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
529    bool result = false;
530    if (VK_NULL_HANDLE == mem) {
531        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
532                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
533                                                      " used with no memory bound. Memory should be bound by calling "
534                                                      "vkBind%sMemory(). %s",
535                         api_name, type_name, handle, type_name, validation_error_map[error_code]);
536    } else if (MEMORY_UNBOUND == mem) {
537        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
538                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
539                                                      " used with no memory bound and previously bound memory was freed. "
540                                                      "Memory must not be freed prior to this operation. %s",
541                         api_name, type_name, handle, validation_error_map[error_code]);
542    }
543    return result;
544}
545
546// Check to see if memory was ever bound to this image
547bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
548                                  UNIQUE_VALIDATION_ERROR_CODE error_code) {
549    bool result = false;
550    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
551        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
552                                          error_code);
553    }
554    return result;
555}
556
557// Check to see if memory was bound to this buffer
558bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
559                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
560    bool result = false;
561    if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
562        result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
563                                          "Buffer", error_code);
564    }
565    return result;
566}
567
568// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
569// Corresponding valid usage checks are in ValidateSetMemBinding().
570static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type, const char *apiName) {
571    if (mem != VK_NULL_HANDLE) {
572        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
573        assert(mem_binding);
574        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
575        if (mem_info) {
576            mem_info->obj_bindings.insert({handle, type});
577            // For image objects, make sure default memory state is correctly set
578            // TODO : What's the best/correct way to handle this?
579            if (kVulkanObjectTypeImage == type) {
580                auto const image_state = GetImageState(dev_data, VkImage(handle));
581                if (image_state) {
582                    VkImageCreateInfo ici = image_state->createInfo;
583                    if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
584                        // TODO::  More memory state transition stuff.
585                    }
586                }
587            }
588            mem_binding->binding.mem = mem;
589        }
590    }
591}
592
593// Valid usage checks for a call to SetMemBinding().
594// For NULL mem case, output warning
595// Make sure given object is in global object map
596//  IF a previous binding existed, output validation error
597//  Otherwise, add reference from objectInfo to memoryInfo
598//  Add reference off of objInfo
599// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
600static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
601                                  const char *apiName) {
602    bool skip = false;
603    // It's an error to bind an object to NULL memory
604    if (mem != VK_NULL_HANDLE) {
605        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
606        assert(mem_binding);
607        if (mem_binding->sparse) {
608            UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_1740082a;
609            const char *handle_type = "IMAGE";
610            if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
611                error_code = VALIDATION_ERROR_1700080c;
612                handle_type = "BUFFER";
613            } else {
614                assert(strcmp(apiName, "vkBindImageMemory()") == 0);
615            }
616            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
617                            HandleToUint64(mem), __LINE__, error_code, "MEM",
618                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
619                            ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT). %s",
620                            apiName, HandleToUint64(mem), handle, handle_type, validation_error_map[error_code]);
621        }
622        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
623        if (mem_info) {
624            DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
625            if (prev_binding) {
626                UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_17400828;
627                if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
628                    error_code = VALIDATION_ERROR_1700080a;
629                } else {
630                    assert(strcmp(apiName, "vkBindImageMemory()") == 0);
631                }
632                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
633                                HandleToUint64(mem), __LINE__, error_code, "MEM",
634                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
635                                ") which has already been bound to mem object 0x%" PRIxLEAST64 ". %s",
636                                apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem),
637                                validation_error_map[error_code]);
638            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
639                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
640                                HandleToUint64(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
641                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
642                                ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
643                                "Vulkan so this attempt to bind to new memory is not allowed.",
644                                apiName, HandleToUint64(mem), handle);
645            }
646        }
647    }
648    return skip;
649}
650
651// For NULL mem case, clear any previous binding Else...
652// Make sure given object is in its object map
653//  IF a previous binding existed, update binding
654//  Add reference from objectInfo to memoryInfo
655//  Add reference off of object's binding info
656// Return VK_TRUE if addition is successful, VK_FALSE otherwise
657static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
658    bool skip = VK_FALSE;
659    // Handle NULL case separately, just clear previous binding & decrement reference
660    if (binding.mem == VK_NULL_HANDLE) {
661        // TODO : This should cause the range of the resource to be unbound according to spec
662    } else {
663        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
664        assert(mem_binding);
665        assert(mem_binding->sparse);
666        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
667        if (mem_info) {
668            mem_info->obj_bindings.insert({handle, type});
669            // Need to set mem binding for this object
670            mem_binding->sparse_bindings.insert(binding);
671        }
672    }
673    return skip;
674}
675
676// Check object status for selected flag state
677static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
678                            const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
679    if (!(pNode->status & status_mask)) {
680        char const *const message = validation_error_map[msg_code];
681        return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
682                       HandleToUint64(pNode->commandBuffer), __LINE__, msg_code, "DS", "command buffer object 0x%p: %s. %s.",
683                       pNode->commandBuffer, fail_msg, message);
684    }
685    return false;
686}
687
688// Retrieve pipeline node ptr for given pipeline object
689static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
690    auto it = dev_data->pipelineMap.find(pipeline);
691    if (it == dev_data->pipelineMap.end()) {
692        return nullptr;
693    }
694    return it->second;
695}
696
697RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
698    auto it = dev_data->renderPassMap.find(renderpass);
699    if (it == dev_data->renderPassMap.end()) {
700        return nullptr;
701    }
702    return it->second.get();
703}
704
705FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
706    auto it = dev_data->frameBufferMap.find(framebuffer);
707    if (it == dev_data->frameBufferMap.end()) {
708        return nullptr;
709    }
710    return it->second.get();
711}
712
713cvdescriptorset::DescriptorSetLayout const *GetDescriptorSetLayout(layer_data const *dev_data, VkDescriptorSetLayout dsLayout) {
714    auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
715    if (it == dev_data->descriptorSetLayoutMap.end()) {
716        return nullptr;
717    }
718    return it->second.get();
719}
720
721static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
722    auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
723    if (it == dev_data->pipelineLayoutMap.end()) {
724        return nullptr;
725    }
726    return &it->second;
727}
728
729shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
730    auto it = dev_data->shaderModuleMap.find(module);
731    if (it == dev_data->shaderModuleMap.end()) {
732        return nullptr;
733    }
734    return it->second.get();
735}
736
737// Return true if for a given PSO, the given state enum is dynamic, else return false
738static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
739    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
740        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
741            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
742        }
743    }
744    return false;
745}
746
747// Validate state stored as flags at time of draw call
748static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
749                                      UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
750    bool result = false;
751    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
752        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
753         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
754        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
755                                  "Dynamic line width state not set for this command buffer", msg_code);
756    }
757    if (pPipe->graphicsPipelineCI.pRasterizationState &&
758        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
759        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
760                                  "Dynamic depth bias state not set for this command buffer", msg_code);
761    }
762    if (pPipe->blendConstantsEnabled) {
763        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
764                                  "Dynamic blend constants state not set for this command buffer", msg_code);
765    }
766    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
767        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
768        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
769                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
770    }
771    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
772        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
773        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
774                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
775        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
776                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
777        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
778                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
779    }
780    if (indexed) {
781        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
782                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
783    }
784
785    return result;
786}
787
788// Verify attachment reference compatibility according to spec
789//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
790//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
791//   to make sure that format and samples counts match.
792//  If not, they are not compatible.
793static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
794                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
795                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
796                                             const VkAttachmentDescription *pSecondaryAttachments) {
797    // Check potential NULL cases first to avoid nullptr issues later
798    if (pPrimary == nullptr) {
799        if (pSecondary == nullptr) {
800            return true;
801        }
802        return false;
803    } else if (pSecondary == nullptr) {
804        return false;
805    }
806    if (index >= primaryCount) {  // Check secondary as if primary is VK_ATTACHMENT_UNUSED
807        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) return true;
808    } else if (index >= secondaryCount) {  // Check primary as if secondary is VK_ATTACHMENT_UNUSED
809        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) return true;
810    } else {  // Format and sample count must match
811        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
812            return true;
813        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
814            return false;
815        }
816        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
817             pSecondaryAttachments[pSecondary[index].attachment].format) &&
818            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
819             pSecondaryAttachments[pSecondary[index].attachment].samples))
820            return true;
821    }
822    // Format and sample counts didn't match
823    return false;
824}
825// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
826// For given primary RenderPass object and secondary RenderPassCreateInfo, verify that they're compatible
827static bool verify_renderpass_compatibility(const layer_data *dev_data, const VkRenderPassCreateInfo *primaryRPCI,
828                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
829    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
830        stringstream errorStr;
831        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
832                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
833        errorMsg = errorStr.str();
834        return false;
835    }
836    uint32_t spIndex = 0;
837    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
838        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
839        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
840        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
841        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
842        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
843            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
844                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
845                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
846                stringstream errorStr;
847                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
848                errorMsg = errorStr.str();
849                return false;
850            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
851                                                         primaryColorCount, primaryRPCI->pAttachments,
852                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
853                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
854                stringstream errorStr;
855                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
856                errorMsg = errorStr.str();
857                return false;
858            }
859        }
860
861        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 1,
862                                              primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
863                                              1, secondaryRPCI->pAttachments)) {
864            stringstream errorStr;
865            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
866            errorMsg = errorStr.str();
867            return false;
868        }
869
870        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
871        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
872        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
873        for (uint32_t i = 0; i < inputMax; ++i) {
874            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryInputCount,
875                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
876                                                  secondaryInputCount, secondaryRPCI->pAttachments)) {
877                stringstream errorStr;
878                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
879                errorMsg = errorStr.str();
880                return false;
881            }
882        }
883    }
884    return true;
885}
886
887// Return Set node ptr for specified set or else NULL
888cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
889    auto set_it = dev_data->setMap.find(set);
890    if (set_it == dev_data->setMap.end()) {
891        return NULL;
892    }
893    return set_it->second;
894}
895
896// For given pipeline, return number of MSAA samples, or one if MSAA disabled
897static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
898    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
899        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
900        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
901    }
902    return VK_SAMPLE_COUNT_1_BIT;
903}
904
905static void list_bits(std::ostream &s, uint32_t bits) {
906    for (int i = 0; i < 32 && bits; i++) {
907        if (bits & (1 << i)) {
908            s << i;
909            bits &= ~(1 << i);
910            if (bits) {
911                s << ",";
912            }
913        }
914    }
915}
916
917// Validate draw-time state related to the PSO
918static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
919                                          PIPELINE_STATE const *pPipeline) {
920    bool skip = false;
921
922    // Verify vertex binding
923    if (pPipeline->vertexBindingDescriptions.size() > 0) {
924        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
925            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
926            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
927                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
928                skip |=
929                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
930                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
931                            "The Pipeline State Object (0x%" PRIxLEAST64
932                            ") expects that this Command Buffer's vertex binding Index %u "
933                            "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
934                            "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
935                            HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
936            }
937        }
938    } else {
939        if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
940            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
941                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
942                            DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
943                            "Vertex buffers are bound to command buffer (0x%p"
944                            ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
945                            pCB->commandBuffer, HandleToUint64(state.pipeline_state->pipeline));
946        }
947    }
948    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
949    // Skip check if rasterization is disabled or there is no viewport.
950    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
951         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
952        pPipeline->graphicsPipelineCI.pViewportState) {
953        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
954        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
955
956        if (dynViewport) {
957            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
958            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
959            if (missingViewportMask) {
960                std::stringstream ss;
961                ss << "Dynamic viewport(s) ";
962                list_bits(ss, missingViewportMask);
963                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
964                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
965                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
966            }
967        }
968
969        if (dynScissor) {
970            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
971            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
972            if (missingScissorMask) {
973                std::stringstream ss;
974                ss << "Dynamic scissor(s) ";
975                list_bits(ss, missingScissorMask);
976                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
977                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
978                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
979            }
980        }
981    }
982
983    // Verify that any MSAA request in PSO matches sample# in bound FB
984    // Skip the check if rasterization is disabled.
985    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
986        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
987        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
988        if (pCB->activeRenderPass) {
989            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
990            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
991            uint32_t i;
992            unsigned subpass_num_samples = 0;
993
994            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
995                auto attachment = subpass_desc->pColorAttachments[i].attachment;
996                if (attachment != VK_ATTACHMENT_UNUSED)
997                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
998            }
999
1000            if (subpass_desc->pDepthStencilAttachment &&
1001                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1002                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1003                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1004            }
1005
1006            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
1007                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1008                                HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1009                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
1010                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
1011                                HandleToUint64(pPipeline->pipeline), pso_num_samples,
1012                                HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
1013            }
1014        } else {
1015            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1016                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1017                            "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
1018                            HandleToUint64(pPipeline->pipeline));
1019        }
1020    }
1021    // Verify that PSO creation renderPass is compatible with active renderPass
1022    if (pCB->activeRenderPass) {
1023        std::string err_string;
1024        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
1025            !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
1026                                             err_string)) {
1027            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1028            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1029                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
1030                            "At Draw time the active render pass (0x%" PRIxLEAST64
1031                            ") is incompatible w/ gfx pipeline "
1032                            "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
1033                            HandleToUint64(pCB->activeRenderPass->renderPass), HandleToUint64(pPipeline->pipeline),
1034                            HandleToUint64(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
1035        }
1036
1037        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1038            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1039                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
1040                            "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
1041                            pCB->activeSubpass);
1042        }
1043    }
1044    // TODO : Add more checks here
1045
1046    return skip;
1047}
1048
1049// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1050// pipelineLayout[layoutIndex]
1051static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1052                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1053                                            string &errorMsg) {
1054    auto num_sets = pipeline_layout->set_layouts.size();
1055    if (layoutIndex >= num_sets) {
1056        stringstream errorStr;
1057        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1058                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1059                 << layoutIndex;
1060        errorMsg = errorStr.str();
1061        return false;
1062    }
1063    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1064    return descriptor_set->IsCompatible(layout_node, &errorMsg);
1065}
1066
1067// Validate overall state at the time of a draw call
1068static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
1069                              const VkPipelineBindPoint bind_point, const char *function,
1070                              UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1071    bool result = false;
1072    auto const &state = cb_node->lastBound[bind_point];
1073    PIPELINE_STATE *pPipe = state.pipeline_state;
1074    if (nullptr == pPipe) {
1075        result |= log_msg(
1076            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1077            HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
1078            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
1079        // Early return as any further checks below will be busted w/o a pipeline
1080        if (result) return true;
1081    }
1082    // First check flag states
1083    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1084        result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
1085
1086    // Now complete other state checks
1087    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1088        string errorString;
1089        auto pipeline_layout = pPipe->pipeline_layout;
1090
1091        for (const auto &set_binding_pair : pPipe->active_slots) {
1092            uint32_t setIndex = set_binding_pair.first;
1093            // If valid set is not bound throw an error
1094            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1095                result |=
1096                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1097                            HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
1098                            "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.",
1099                            HandleToUint64(pPipe->pipeline), setIndex);
1100            } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
1101                                                        errorString)) {
1102                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1103                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1104                result |=
1105                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1106                            HandleToUint64(setHandle), __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
1107                            "VkDescriptorSet (0x%" PRIxLEAST64
1108                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
1109                            HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
1110            } else {  // Valid set is bound and layout compatible, validate that it's updated
1111                // Pull the set node
1112                cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1113                // Validate the draw-time state for this descriptor set
1114                std::string err_str;
1115                if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], cb_node, function,
1116                                                       &err_str)) {
1117                    auto set = descriptor_set->GetSet();
1118                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1119                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), __LINE__,
1120                                      DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
1121                                      "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s time: %s",
1122                                      HandleToUint64(set), function, err_str.c_str());
1123                }
1124            }
1125        }
1126    }
1127
1128    // Check general pipeline state that needs to be validated at drawtime
1129    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, pPipe);
1130
1131    return result;
1132}
1133
1134static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1135    auto const &state = cb_state->lastBound[bind_point];
1136    PIPELINE_STATE *pPipe = state.pipeline_state;
1137    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1138        for (const auto &set_binding_pair : pPipe->active_slots) {
1139            uint32_t setIndex = set_binding_pair.first;
1140            // Pull the set node
1141            cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1142            // Bind this set and its active descriptor resources to the command buffer
1143            descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
1144            // For given active slots record updated images & buffers
1145            descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
1146        }
1147    }
1148    if (pPipe->vertexBindingDescriptions.size() > 0) {
1149        cb_state->vertex_buffer_used = true;
1150    }
1151}
1152
1153// Validate HW line width capabilities prior to setting requested line width.
1154static bool verifyLineWidth(layer_data *dev_data, DRAW_STATE_ERROR dsError, VulkanObjectType object_type, const uint64_t &target,
1155                            float lineWidth) {
1156    bool skip = false;
1157
1158    // First check to see if the physical device supports wide lines.
1159    if ((VK_FALSE == dev_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
1160        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target, __LINE__,
1161                        dsError, "DS",
1162                        "Attempt to set lineWidth to %f but physical device wideLines feature "
1163                        "not supported/enabled so lineWidth must be 1.0f!",
1164                        lineWidth);
1165    } else {
1166        // Otherwise, make sure the width falls in the valid range.
1167        if ((dev_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
1168            (dev_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
1169            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target,
1170                            __LINE__, dsError, "DS",
1171                            "Attempt to set lineWidth to %f but physical device limits line width "
1172                            "to between [%f, %f]!",
1173                            lineWidth, dev_data->phys_dev_properties.properties.limits.lineWidthRange[0],
1174                            dev_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
1175        }
1176    }
1177
1178    return skip;
1179}
1180
1181// Verify that create state for a pipeline is valid
1182static bool verifyPipelineCreateState(layer_data *dev_data, std::vector<PIPELINE_STATE *> pPipelines, int pipelineIndex) {
1183    bool skip = false;
1184
1185    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
1186
1187    // If create derivative bit is set, check that we've specified a base
1188    // pipeline correctly, and that the base pipeline was created to allow
1189    // derivatives.
1190    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1191        PIPELINE_STATE *pBasePipeline = nullptr;
1192        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1193              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1194            // This check is a superset of VALIDATION_ERROR_096005a8 and VALIDATION_ERROR_096005aa
1195            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1196                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1197                            "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1198        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1199            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1200                skip |=
1201                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1202                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_208005a0, "DS",
1203                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
1204                            validation_error_map[VALIDATION_ERROR_208005a0]);
1205            } else {
1206                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
1207            }
1208        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1209            pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
1210        }
1211
1212        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1213            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1214                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1215                            "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1216        }
1217    }
1218
1219    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1220        const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1221        auto const render_pass_info = GetRenderPassState(dev_data, pPipeline->graphicsPipelineCI.renderPass)->createInfo.ptr();
1222        const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1223        if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1224            skip |= log_msg(
1225                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1226                HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005d4, "DS",
1227                "vkCreateGraphicsPipelines(): Render pass (0x%" PRIxLEAST64
1228                ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u. %s",
1229                HandleToUint64(pPipeline->graphicsPipelineCI.renderPass), pPipeline->graphicsPipelineCI.subpass,
1230                subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount,
1231                validation_error_map[VALIDATION_ERROR_096005d4]);
1232        }
1233        if (!dev_data->enabled_features.independentBlend) {
1234            if (pPipeline->attachments.size() > 1) {
1235                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1236                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1237                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1238                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1239                    // only attachment state, so memcmp is best suited for the comparison
1240                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1241                               sizeof(pAttachments[0]))) {
1242                        skip |=
1243                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1244                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004ba, "DS",
1245                                    "Invalid Pipeline CreateInfo: If independent blend feature not "
1246                                    "enabled, all elements of pAttachments must be identical. %s",
1247                                    validation_error_map[VALIDATION_ERROR_0f4004ba]);
1248                        break;
1249                    }
1250                }
1251            }
1252        }
1253        if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1254            skip |=
1255                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1256                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004bc, "DS",
1257                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
1258                        validation_error_map[VALIDATION_ERROR_0f4004bc]);
1259        }
1260    }
1261
1262    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
1263    // produces nonsense errors that confuse users. Other layers should already
1264    // emit errors for renderpass being invalid.
1265    auto renderPass = GetRenderPassState(dev_data, pPipeline->graphicsPipelineCI.renderPass);
1266    if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
1267        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1268                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ee, "DS",
1269                        "Invalid Pipeline CreateInfo State: Subpass index %u "
1270                        "is out of range for this renderpass (0..%u). %s",
1271                        pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1,
1272                        validation_error_map[VALIDATION_ERROR_096005ee]);
1273    }
1274
1275    if (validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
1276        skip = true;
1277    }
1278    // Each shader's stage must be unique
1279    if (pPipeline->duplicate_shaders) {
1280        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1281            if (pPipeline->duplicate_shaders & stage) {
1282                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1283                                HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1284                                "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1285                                string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1286            }
1287        }
1288    }
1289    // VS is required
1290    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1291        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1292                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ae, "DS",
1293                        "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
1294                        validation_error_map[VALIDATION_ERROR_096005ae]);
1295    }
1296    // Either both or neither TC/TE shaders should be defined
1297    bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1298    bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1299    if (has_control && !has_eval) {
1300        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1301                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b2, "DS",
1302                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1303                        validation_error_map[VALIDATION_ERROR_096005b2]);
1304    }
1305    if (!has_control && has_eval) {
1306        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1307                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b4, "DS",
1308                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1309                        validation_error_map[VALIDATION_ERROR_096005b4]);
1310    }
1311    // Compute shaders should be specified independent of Gfx shaders
1312    if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1313        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1314                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b0, "DS",
1315                        "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
1316                        validation_error_map[VALIDATION_ERROR_096005b0]);
1317    }
1318    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1319    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1320    if (has_control && has_eval &&
1321        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1322         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1323        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1324                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c0, "DS",
1325                        "Invalid Pipeline CreateInfo State: "
1326                        "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
1327                        "topology for tessellation pipelines. %s",
1328                        validation_error_map[VALIDATION_ERROR_096005c0]);
1329    }
1330    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1331        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1332        if (!has_control || !has_eval) {
1333            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1334                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c2, "DS",
1335                            "Invalid Pipeline CreateInfo State: "
1336                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
1337                            "topology is only valid for tessellation pipelines. %s",
1338                            validation_error_map[VALIDATION_ERROR_096005c2]);
1339        }
1340    }
1341
1342    // If a rasterization state is provided...
1343    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1344        // Make sure that the line width conforms to the HW.
1345        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
1346            skip |=
1347                verifyLineWidth(dev_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, kVulkanObjectTypePipeline,
1348                                HandleToUint64(pPipeline->pipeline), pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
1349        }
1350
1351        if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1352            (!dev_data->enabled_features.depthClamp)) {
1353            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1354                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_1020061c, "DS",
1355                            "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable "
1356                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE. %s",
1357                            validation_error_map[VALIDATION_ERROR_1020061c]);
1358        }
1359
1360        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1361            (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1362            (!dev_data->enabled_features.depthBiasClamp)) {
1363            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1364                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1365                            "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
1366                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1367                            "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1368        }
1369
1370        // If rasterization is enabled...
1371        if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1372            auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
1373
1374            if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1375                (!dev_data->enabled_features.alphaToOne)) {
1376                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1377                                HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_10000622, "DS",
1378                                "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1379                                "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE. %s",
1380                                validation_error_map[VALIDATION_ERROR_10000622]);
1381            }
1382
1383            // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1384            if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1385                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1386                if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1387                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1388                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e0, "DS",
1389                                    "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is "
1390                                    "enabled and subpass uses a depth/stencil attachment. %s",
1391                                    validation_error_map[VALIDATION_ERROR_096005e0]);
1392
1393                } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1394                           (!dev_data->enabled_features.depthBounds)) {
1395                    skip |= log_msg(
1396                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1397                        HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1398                        "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the depthBoundsTestEnable "
1399                        "member of the VkPipelineDepthStencilStateCreateInfo structure must be set to VK_FALSE.");
1400                }
1401            }
1402
1403            // If subpass uses color attachments, pColorBlendState must be valid pointer
1404            if (subpass_desc) {
1405                uint32_t color_attachment_count = 0;
1406                for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1407                    if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1408                        ++color_attachment_count;
1409                    }
1410                }
1411                if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1412                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1413                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e2, "DS",
1414                                    "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is "
1415                                    "enabled and subpass uses color attachments. %s",
1416                                    validation_error_map[VALIDATION_ERROR_096005e2]);
1417                }
1418            }
1419        }
1420    }
1421
1422    return skip;
1423}
1424
1425// Free the Pipeline nodes
1426static void deletePipelines(layer_data *dev_data) {
1427    if (dev_data->pipelineMap.size() <= 0) return;
1428    for (auto &pipe_map_pair : dev_data->pipelineMap) {
1429        delete pipe_map_pair.second;
1430    }
1431    dev_data->pipelineMap.clear();
1432}
1433
1434// Block of code at start here specifically for managing/tracking DSs
1435
1436// Return Pool node ptr for specified pool or else NULL
1437DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
1438    auto pool_it = dev_data->descriptorPoolMap.find(pool);
1439    if (pool_it == dev_data->descriptorPoolMap.end()) {
1440        return NULL;
1441    }
1442    return pool_it->second;
1443}
1444
1445// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1446// func_str is the name of the calling function
1447// Return false if no errors occur
1448// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1449static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
1450    if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1451    bool skip = false;
1452    auto set_node = dev_data->setMap.find(set);
1453    if (set_node == dev_data->setMap.end()) {
1454        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1455                        HandleToUint64(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
1456                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
1457                        HandleToUint64(set));
1458    } else {
1459        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1460        if (set_node->second->in_use.load()) {
1461            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1462                            HandleToUint64(set), __LINE__, VALIDATION_ERROR_2860026a, "DS",
1463                            "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
1464                            func_str.c_str(), HandleToUint64(set), validation_error_map[VALIDATION_ERROR_2860026a]);
1465        }
1466    }
1467    return skip;
1468}
1469
1470// Remove set from setMap and delete the set
1471static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1472    dev_data->setMap.erase(descriptor_set->GetSet());
1473    delete descriptor_set;
1474}
1475// Free all DS Pools including their Sets & related sub-structs
1476// NOTE : Calls to this function should be wrapped in mutex
1477static void deletePools(layer_data *dev_data) {
1478    for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1479        // Remove this pools' sets from setMap and delete them
1480        for (auto ds : ii->second->sets) {
1481            freeDescriptorSet(dev_data, ds);
1482        }
1483        ii->second->sets.clear();
1484        delete ii->second;
1485        ii = dev_data->descriptorPoolMap.erase(ii);
1486    }
1487}
1488
1489static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
1490                                VkDescriptorPoolResetFlags flags) {
1491    DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1492    // TODO: validate flags
1493    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
1494    for (auto ds : pPool->sets) {
1495        freeDescriptorSet(dev_data, ds);
1496    }
1497    pPool->sets.clear();
1498    // Reset available count for each type and available sets for this pool
1499    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
1500        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
1501    }
1502    pPool->availableSets = pPool->maxSets;
1503}
1504
1505// For given CB object, fetch associated CB Node from map
1506GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
1507    auto it = dev_data->commandBufferMap.find(cb);
1508    if (it == dev_data->commandBufferMap.end()) {
1509        return NULL;
1510    }
1511    return it->second;
1512}
1513
1514// If a renderpass is active, verify that the given command type is appropriate for current subpass state
1515bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1516    if (!pCB->activeRenderPass) return false;
1517    bool skip = false;
1518    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1519        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
1520        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1521                        HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1522                        "Commands cannot be called in a subpass using secondary command buffers.");
1523    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1524        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1525                        HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1526                        "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1527    }
1528    return skip;
1529}
1530
1531bool ValidateCmdQueueFlags(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const char *caller_name, VkQueueFlags required_flags,
1532                           UNIQUE_VALIDATION_ERROR_CODE error_code) {
1533    auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
1534    if (pool) {
1535        VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
1536        if (!(required_flags & queue_flags)) {
1537            string required_flags_string;
1538            for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1539                if (flag & required_flags) {
1540                    if (required_flags_string.size()) {
1541                        required_flags_string += " or ";
1542                    }
1543                    required_flags_string += string_VkQueueFlagBits(flag);
1544                }
1545            }
1546            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1547                           HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
1548                           "Cannot call %s on a command buffer allocated from a pool without %s capabilities. %s.", caller_name,
1549                           required_flags_string.c_str(), validation_error_map[error_code]);
1550        }
1551    }
1552    return false;
1553}
1554
1555static char const * GetCauseStr(VK_OBJECT obj) {
1556    if (obj.type == kVulkanObjectTypeDescriptorSet)
1557        return "destroyed or updated";
1558    if (obj.type == kVulkanObjectTypeCommandBuffer)
1559        return "destroyed or rerecorded";
1560    return "destroyed";
1561}
1562
1563static bool ReportInvalidCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source) {
1564    bool skip = false;
1565    for (auto obj : cb_state->broken_bindings) {
1566        const char *type_str = object_string[obj.type];
1567        const char *cause_str = GetCauseStr(obj);
1568        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1569                        HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1570                        "You are adding %s to command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
1571                        call_source, cb_state->commandBuffer, type_str, obj.handle, cause_str);
1572    }
1573    return skip;
1574}
1575
1576// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1577// there's an issue with the Cmd ordering
1578bool ValidateCmd(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1579    switch (cb_state->state) {
1580        case CB_RECORDING:
1581            return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1582
1583        case CB_INVALID:
1584            return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1585
1586        default:
1587            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1588                           HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
1589                           "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
1590    }
1591}
1592
1593void UpdateCmdBufferLastCmd(GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd) {
1594    if (cb_state->state == CB_RECORDING) {
1595        cb_state->last_cmd = cmd;
1596    }
1597}
1598// For given object struct return a ptr of BASE_NODE type for its wrapping struct
1599BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
1600    BASE_NODE *base_ptr = nullptr;
1601    switch (object_struct.type) {
1602        case kVulkanObjectTypeDescriptorSet: {
1603            base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
1604            break;
1605        }
1606        case kVulkanObjectTypeSampler: {
1607            base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
1608            break;
1609        }
1610        case kVulkanObjectTypeQueryPool: {
1611            base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
1612            break;
1613        }
1614        case kVulkanObjectTypePipeline: {
1615            base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
1616            break;
1617        }
1618        case kVulkanObjectTypeBuffer: {
1619            base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
1620            break;
1621        }
1622        case kVulkanObjectTypeBufferView: {
1623            base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
1624            break;
1625        }
1626        case kVulkanObjectTypeImage: {
1627            base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
1628            break;
1629        }
1630        case kVulkanObjectTypeImageView: {
1631            base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
1632            break;
1633        }
1634        case kVulkanObjectTypeEvent: {
1635            base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
1636            break;
1637        }
1638        case kVulkanObjectTypeDescriptorPool: {
1639            base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
1640            break;
1641        }
1642        case kVulkanObjectTypeCommandPool: {
1643            base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
1644            break;
1645        }
1646        case kVulkanObjectTypeFramebuffer: {
1647            base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
1648            break;
1649        }
1650        case kVulkanObjectTypeRenderPass: {
1651            base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
1652            break;
1653        }
1654        case kVulkanObjectTypeDeviceMemory: {
1655            base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
1656            break;
1657        }
1658        default:
1659            // TODO : Any other objects to be handled here?
1660            assert(0);
1661            break;
1662    }
1663    return base_ptr;
1664}
1665
1666// Tie the VK_OBJECT to the cmd buffer which includes:
1667//  Add object_binding to cmd buffer
1668//  Add cb_binding to object
1669static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
1670    cb_bindings->insert(cb_node);
1671    cb_node->object_bindings.insert(obj);
1672}
1673// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
1674static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
1675    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
1676    if (base_obj) base_obj->cb_bindings.erase(cb_node);
1677}
1678// Reset the command buffer state
1679//  Maintain the createInfo and set state to CB_NEW, but clear all other state
1680static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
1681    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
1682    if (pCB) {
1683        pCB->in_use.store(0);
1684        pCB->last_cmd = CMD_NONE;
1685        // Reset CB state (note that createInfo is not cleared)
1686        pCB->commandBuffer = cb;
1687        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1688        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
1689        pCB->hasDrawCmd = false;
1690        pCB->state = CB_NEW;
1691        pCB->submitCount = 0;
1692        pCB->status = 0;
1693        pCB->viewportMask = 0;
1694        pCB->scissorMask = 0;
1695
1696        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
1697            pCB->lastBound[i].reset();
1698        }
1699
1700        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
1701        pCB->activeRenderPass = nullptr;
1702        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
1703        pCB->activeSubpass = 0;
1704        pCB->broken_bindings.clear();
1705        pCB->waitedEvents.clear();
1706        pCB->events.clear();
1707        pCB->writeEventsBeforeWait.clear();
1708        pCB->waitedEventsBeforeQueryReset.clear();
1709        pCB->queryToStateMap.clear();
1710        pCB->activeQueries.clear();
1711        pCB->startedQueries.clear();
1712        pCB->imageLayoutMap.clear();
1713        pCB->eventToStageMap.clear();
1714        pCB->drawData.clear();
1715        pCB->currentDrawData.buffers.clear();
1716        pCB->vertex_buffer_used = false;
1717        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
1718        // If secondary, invalidate any primary command buffer that may call us.
1719        if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1720            invalidateCommandBuffers(dev_data,
1721                                     pCB->linkedCommandBuffers,
1722                                     {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
1723        }
1724
1725        // Remove reverse command buffer links.
1726        for (auto pSubCB : pCB->linkedCommandBuffers) {
1727            pSubCB->linkedCommandBuffers.erase(pCB);
1728        }
1729        pCB->linkedCommandBuffers.clear();
1730        pCB->updateImages.clear();
1731        pCB->updateBuffers.clear();
1732        clear_cmd_buf_and_mem_references(dev_data, pCB);
1733        pCB->eventUpdates.clear();
1734        pCB->queryUpdates.clear();
1735
1736        // Remove object bindings
1737        for (auto obj : pCB->object_bindings) {
1738            removeCommandBufferBinding(dev_data, &obj, pCB);
1739        }
1740        pCB->object_bindings.clear();
1741        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
1742        for (auto framebuffer : pCB->framebuffers) {
1743            auto fb_state = GetFramebufferState(dev_data, framebuffer);
1744            if (fb_state) fb_state->cb_bindings.erase(pCB);
1745        }
1746        pCB->framebuffers.clear();
1747        pCB->activeFramebuffer = VK_NULL_HANDLE;
1748    }
1749}
1750
1751// Set PSO-related status bits for CB, including dynamic state set via PSO
1752static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
1753    // Account for any dynamic state not set via this PSO
1754    if (!pPipe->graphicsPipelineCI.pDynamicState ||
1755        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) {  // All state is static
1756        pCB->status |= CBSTATUS_ALL_STATE_SET;
1757    } else {
1758        // First consider all state on
1759        // Then unset any state that's noted as dynamic in PSO
1760        // Finally OR that into CB statemask
1761        CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
1762        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
1763            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
1764                case VK_DYNAMIC_STATE_LINE_WIDTH:
1765                    psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
1766                    break;
1767                case VK_DYNAMIC_STATE_DEPTH_BIAS:
1768                    psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
1769                    break;
1770                case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1771                    psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
1772                    break;
1773                case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1774                    psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
1775                    break;
1776                case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1777                    psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
1778                    break;
1779                case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1780                    psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
1781                    break;
1782                case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1783                    psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
1784                    break;
1785                default:
1786                    // TODO : Flag error here
1787                    break;
1788            }
1789        }
1790        pCB->status |= psoDynStateMask;
1791    }
1792}
1793
1794// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
1795// render pass.
1796bool insideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1797    bool inside = false;
1798    if (pCB->activeRenderPass) {
1799        inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1800                         HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1801                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
1802                         HandleToUint64(pCB->activeRenderPass->renderPass), validation_error_map[msgCode]);
1803    }
1804    return inside;
1805}
1806
1807// Flags validation error if the associated call is made outside a render pass. The apiName
1808// routine should ONLY be called inside a render pass.
1809bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1810    bool outside = false;
1811    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
1812        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
1813         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
1814        outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1815                          HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1816                          "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
1817    }
1818    return outside;
1819}
1820
1821static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
1822    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
1823}
1824
1825// For the given ValidationCheck enum, set all relevant instance disabled flags to true
1826void SetDisabledFlags(instance_layer_data *instance_data, VkValidationFlagsEXT *val_flags_struct) {
1827    for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
1828        switch (val_flags_struct->pDisabledValidationChecks[i]) {
1829            case VK_VALIDATION_CHECK_SHADERS_EXT:
1830                instance_data->disabled.shader_validation = true;
1831                break;
1832            case VK_VALIDATION_CHECK_ALL_EXT:
1833                // Set all disabled flags to true
1834                instance_data->disabled.SetAll(true);
1835                break;
1836            default:
1837                break;
1838        }
1839    }
1840}
1841
1842VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
1843                                              VkInstance *pInstance) {
1844    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
1845
1846    assert(chain_info->u.pLayerInfo);
1847    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
1848    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
1849    if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
1850
1851    // Advance the link info for the next element on the chain
1852    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
1853
1854    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
1855    if (result != VK_SUCCESS) return result;
1856
1857    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
1858    instance_data->instance = *pInstance;
1859    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
1860    instance_data->report_data = debug_report_create_instance(
1861        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
1862    instance_data->extensions.InitFromInstanceCreateInfo(pCreateInfo);
1863    init_core_validation(instance_data, pAllocator);
1864
1865    ValidateLayerOrdering(*pCreateInfo);
1866    // Parse any pNext chains
1867    if (pCreateInfo->pNext) {
1868        GENERIC_HEADER *struct_header = (GENERIC_HEADER *)pCreateInfo->pNext;
1869        while (struct_header) {
1870            // Check for VkValidationFlagsExt
1871            if (VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT == struct_header->sType) {
1872                SetDisabledFlags(instance_data, (VkValidationFlagsEXT *)struct_header);
1873            }
1874            struct_header = (GENERIC_HEADER *)struct_header->pNext;
1875        }
1876    }
1877
1878    return result;
1879}
1880
1881// Hook DestroyInstance to remove tableInstanceMap entry
1882VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
1883    // TODOSC : Shouldn't need any customization here
1884    dispatch_key key = get_dispatch_key(instance);
1885    // TBD: Need any locking this early, in case this function is called at the
1886    // same time by more than one thread?
1887    instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
1888    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
1889
1890    std::lock_guard<std::mutex> lock(global_lock);
1891    // Clean up logging callback, if any
1892    while (instance_data->logging_callback.size() > 0) {
1893        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
1894        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
1895        instance_data->logging_callback.pop_back();
1896    }
1897
1898    layer_debug_report_destroy_instance(instance_data->report_data);
1899    FreeLayerDataPtr(key, instance_layer_data_map);
1900}
1901
1902static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1903                                              uint32_t requested_queue_family, int32_t err_code, const char *cmd_name,
1904                                              const char *queue_family_var_name, const char *vu_note = nullptr) {
1905    bool skip = false;
1906
1907    if (!vu_note) vu_note = validation_error_map[err_code];
1908
1909    const char *conditional_ext_cmd =
1910        instance_data->extensions.vk_khr_get_physical_device_properties_2 ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR" : "";
1911
1912    std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
1913                                 ? "the pQueueFamilyPropertyCount was never obtained"
1914                                 : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
1915
1916    if (requested_queue_family >= pd_state->queue_family_count) {
1917        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
1918                        HandleToUint64(pd_state->phys_device), __LINE__, err_code, "DL",
1919                        "%s: %s (= %" PRIu32
1920                        ") is not less than any previously obtained pQueueFamilyPropertyCount from "
1921                        "vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
1922                        cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str(), vu_note);
1923    }
1924    return skip;
1925}
1926
1927// Verify VkDeviceQueueCreateInfos
1928static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1929                                           uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
1930    bool skip = false;
1931
1932    for (uint32_t i = 0; i < info_count; ++i) {
1933        const auto requested_queue_family = infos[i].queueFamilyIndex;
1934
1935        // Verify that requested queue family is known to be valid at this point in time
1936        std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
1937        skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family, VALIDATION_ERROR_06c002fa,
1938                                                  "vkCreateDevice", queue_family_var_name.c_str());
1939
1940        // Verify that requested  queue count of queue family is known to be valid at this point in time
1941        if (requested_queue_family < pd_state->queue_family_count) {
1942            const auto requested_queue_count = infos[i].queueCount;
1943            const auto queue_family_props_count = pd_state->queue_family_properties.size();
1944            const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
1945            const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
1946                                                  ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR"
1947                                                  : "";
1948            std::string count_note =
1949                !queue_family_has_props
1950                    ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
1951                    : "i.e. is not less than or equal to " +
1952                          std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
1953
1954            if (!queue_family_has_props ||
1955                requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
1956                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1957                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), __LINE__,
1958                                VALIDATION_ERROR_06c002fc, "DL",
1959                                "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
1960                                ") is not "
1961                                "less than or equal to available queue count for this "
1962                                "pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex} (=%" PRIu32
1963                                ") obtained previously "
1964                                "from vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
1965                                i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str(),
1966                                validation_error_map[VALIDATION_ERROR_06c002fc]);
1967            }
1968        }
1969    }
1970
1971    return skip;
1972}
1973
1974// Verify that features have been queried and that they are available
1975static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1976                                      const VkPhysicalDeviceFeatures *requested_features) {
1977    bool skip = false;
1978
1979    const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features);
1980    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
1981    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
1982    //  Need to provide the struct member name with the issue. To do that seems like we'll
1983    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
1984    uint32_t errors = 0;
1985    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1986    for (uint32_t i = 0; i < total_bools; i++) {
1987        if (requested[i] > actual[i]) {
1988            // TODO: Add index to struct member name helper to be able to include a feature name
1989            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1990                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
1991                            "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
1992                            "which is not available on this device.",
1993                            i);
1994            errors++;
1995        }
1996    }
1997    if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
1998        // If user didn't request features, notify them that they should
1999        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
2000        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2001                        0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
2002                        "You requested features that are unavailable on this device. You should first query feature "
2003                        "availability by calling vkGetPhysicalDeviceFeatures().");
2004    }
2005    return skip;
2006}
2007
2008VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2009                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2010    bool skip = false;
2011    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2012
2013    std::unique_lock<std::mutex> lock(global_lock);
2014    auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
2015
2016    // TODO: object_tracker should perhaps do this instead
2017    //       and it does not seem to currently work anyway -- the loader just crashes before this point
2018    if (!GetPhysicalDeviceState(instance_data, gpu)) {
2019        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2020                        0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
2021                        "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2022    }
2023
2024    // Check that any requested features are available
2025    if (pCreateInfo->pEnabledFeatures) {
2026        skip |= ValidateRequestedFeatures(instance_data, pd_state, pCreateInfo->pEnabledFeatures);
2027    }
2028    skip |=
2029        ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2030
2031    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2032
2033    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2034
2035    assert(chain_info->u.pLayerInfo);
2036    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2037    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2038    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
2039    if (fpCreateDevice == NULL) {
2040        return VK_ERROR_INITIALIZATION_FAILED;
2041    }
2042
2043    // Advance the link info for the next element on the chain
2044    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2045
2046    lock.unlock();
2047
2048    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
2049    if (result != VK_SUCCESS) {
2050        return result;
2051    }
2052
2053    lock.lock();
2054    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
2055
2056    device_data->instance_data = instance_data;
2057    // Setup device dispatch table
2058    layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
2059    device_data->device = *pDevice;
2060    // Save PhysicalDevice handle
2061    device_data->physical_device = gpu;
2062
2063    device_data->report_data = layer_debug_report_create_device(instance_data->report_data, *pDevice);
2064    device_data->extensions.InitFromDeviceCreateInfo(&instance_data->extensions, pCreateInfo);
2065
2066    // Get physical device limits for this device
2067    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
2068    uint32_t count;
2069    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2070    device_data->phys_dev_properties.queue_family_properties.resize(count);
2071    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
2072        gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
2073    // TODO: device limits should make sure these are compatible
2074    if (pCreateInfo->pEnabledFeatures) {
2075        device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
2076    } else {
2077        memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
2078    }
2079    // Store physical device properties and physical device mem limits into device layer_data structs
2080    instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
2081    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
2082    lock.unlock();
2083
2084    ValidateLayerOrdering(*pCreateInfo);
2085
2086    return result;
2087}
2088
2089// prototype
2090VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2091    // TODOSC : Shouldn't need any customization here
2092    dispatch_key key = get_dispatch_key(device);
2093    layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
2094    // Free all the memory
2095    std::unique_lock<std::mutex> lock(global_lock);
2096    deletePipelines(dev_data);
2097    dev_data->renderPassMap.clear();
2098    for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
2099        delete (*ii).second;
2100    }
2101    dev_data->commandBufferMap.clear();
2102    // This will also delete all sets in the pool & remove them from setMap
2103    deletePools(dev_data);
2104    // All sets should be removed
2105    assert(dev_data->setMap.empty());
2106    dev_data->descriptorSetLayoutMap.clear();
2107    dev_data->imageViewMap.clear();
2108    dev_data->imageMap.clear();
2109    dev_data->imageSubresourceMap.clear();
2110    dev_data->imageLayoutMap.clear();
2111    dev_data->bufferViewMap.clear();
2112    dev_data->bufferMap.clear();
2113    // Queues persist until device is destroyed
2114    dev_data->queueMap.clear();
2115    // Report any memory leaks
2116    layer_debug_report_destroy_device(device);
2117    lock.unlock();
2118
2119#if DISPATCH_MAP_DEBUG
2120    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
2121#endif
2122
2123    dev_data->dispatch_table.DestroyDevice(device, pAllocator);
2124    FreeLayerDataPtr(key, layer_data_map);
2125}
2126
2127static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
2128
2129// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2130//   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
2131static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2132                                         UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
2133    bool skip = false;
2134    if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2135        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2136                        geo_error_id, "DL",
2137                        "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when "
2138                        "device does not have geometryShader feature enabled. %s",
2139                        caller, validation_error_map[geo_error_id]);
2140    }
2141    if (!dev_data->enabled_features.tessellationShader &&
2142        (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2143        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2144                        tess_error_id, "DL",
2145                        "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT "
2146                        "and/or VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device "
2147                        "does not have tessellationShader feature enabled. %s",
2148                        caller, validation_error_map[tess_error_id]);
2149    }
2150    return skip;
2151}
2152
2153// Loop through bound objects and increment their in_use counts.
2154static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2155    for (auto obj : cb_node->object_bindings) {
2156        auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2157        if (base_obj) {
2158            base_obj->in_use.fetch_add(1);
2159        }
2160    }
2161}
2162// Track which resources are in-flight by atomically incrementing their "in_use" count
2163static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2164    cb_node->submitCount++;
2165    cb_node->in_use.fetch_add(1);
2166
2167    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2168    IncrementBoundObjects(dev_data, cb_node);
2169    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2170    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2171    //  should then be flagged prior to calling this function
2172    for (auto drawDataElement : cb_node->drawData) {
2173        for (auto buffer : drawDataElement.buffers) {
2174            auto buffer_state = GetBufferState(dev_data, buffer);
2175            if (buffer_state) {
2176                buffer_state->in_use.fetch_add(1);
2177            }
2178        }
2179    }
2180    for (auto event : cb_node->writeEventsBeforeWait) {
2181        auto event_state = GetEventNode(dev_data, event);
2182        if (event_state) event_state->write_in_use++;
2183    }
2184}
2185
2186// Note: This function assumes that the global lock is held by the calling thread.
2187// For the given queue, verify the queue state up to the given seq number.
2188// Currently the only check is to make sure that if there are events to be waited on prior to
2189//  a QueryReset, make sure that all such events have been signalled.
2190static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2191    bool skip = false;
2192
2193    // sequence number we want to validate up to, per queue
2194    std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs { { initial_queue, initial_seq } };
2195    // sequence number we've completed validation for, per queue
2196    std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2197    std::vector<QUEUE_STATE *> worklist { initial_queue };
2198
2199    while (worklist.size()) {
2200        auto queue = worklist.back();
2201        worklist.pop_back();
2202
2203        auto target_seq = target_seqs[queue];
2204        auto seq = std::max(done_seqs[queue], queue->seq);
2205        auto sub_it = queue->submissions.begin() + int(seq - queue->seq);  // seq >= queue->seq
2206
2207        for (; seq < target_seq; ++sub_it, ++seq) {
2208            for (auto &wait : sub_it->waitSemaphores) {
2209                auto other_queue = GetQueueState(dev_data, wait.queue);
2210
2211                if (other_queue == queue)
2212                    continue;   // semaphores /always/ point backwards, so no point here.
2213
2214                auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2215                auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2216
2217                // if this wait is for another queue, and covers new sequence
2218                // numbers beyond what we've already validated, mark the new
2219                // target seq and (possibly-re)add the queue to the worklist.
2220                if (other_done_seq < other_target_seq) {
2221                    target_seqs[other_queue] = other_target_seq;
2222                    worklist.push_back(other_queue);
2223                }
2224            }
2225
2226            for (auto cb : sub_it->cbs) {
2227                auto cb_node = GetCBNode(dev_data, cb);
2228                if (cb_node) {
2229                    for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
2230                        for (auto event : queryEventsPair.second) {
2231                            if (dev_data->eventMap[event].needsSignaled) {
2232                                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2233                                                VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
2234                                                "Cannot get query results on queryPool 0x%" PRIx64
2235                                                " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
2236                                                HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
2237                                                HandleToUint64(event));
2238                            }
2239                        }
2240                    }
2241                }
2242            }
2243        }
2244
2245        // finally mark the point we've now validated this queue to.
2246        done_seqs[queue] = seq;
2247    }
2248
2249    return skip;
2250}
2251
2252// When the given fence is retired, verify outstanding queue operations through the point of the fence
2253static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2254    auto fence_state = GetFenceNode(dev_data, fence);
2255    if (VK_NULL_HANDLE != fence_state->signaler.first) {
2256        return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
2257    }
2258    return false;
2259}
2260
2261// Decrement in-use count for objects bound to command buffer
2262static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2263    BASE_NODE *base_obj = nullptr;
2264    for (auto obj : cb_node->object_bindings) {
2265        base_obj = GetStateStructPtrFromObject(dev_data, obj);
2266        if (base_obj) {
2267            base_obj->in_use.fetch_sub(1);
2268        }
2269    }
2270}
2271
2272static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2273    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2274
2275    // Roll this queue forward, one submission at a time.
2276    while (pQueue->seq < seq) {
2277        auto &submission = pQueue->submissions.front();
2278
2279        for (auto &wait : submission.waitSemaphores) {
2280            auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
2281            if (pSemaphore) {
2282                pSemaphore->in_use.fetch_sub(1);
2283            }
2284            auto &lastSeq = otherQueueSeqs[wait.queue];
2285            lastSeq = std::max(lastSeq, wait.seq);
2286        }
2287
2288        for (auto &semaphore : submission.signalSemaphores) {
2289            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2290            if (pSemaphore) {
2291                pSemaphore->in_use.fetch_sub(1);
2292            }
2293        }
2294
2295        for (auto cb : submission.cbs) {
2296            auto cb_node = GetCBNode(dev_data, cb);
2297            if (!cb_node) {
2298                continue;
2299            }
2300            // First perform decrement on general case bound objects
2301            DecrementBoundResources(dev_data, cb_node);
2302            for (auto drawDataElement : cb_node->drawData) {
2303                for (auto buffer : drawDataElement.buffers) {
2304                    auto buffer_state = GetBufferState(dev_data, buffer);
2305                    if (buffer_state) {
2306                        buffer_state->in_use.fetch_sub(1);
2307                    }
2308                }
2309            }
2310            for (auto event : cb_node->writeEventsBeforeWait) {
2311                auto eventNode = dev_data->eventMap.find(event);
2312                if (eventNode != dev_data->eventMap.end()) {
2313                    eventNode->second.write_in_use--;
2314                }
2315            }
2316            for (auto queryStatePair : cb_node->queryToStateMap) {
2317                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2318            }
2319            for (auto eventStagePair : cb_node->eventToStageMap) {
2320                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2321            }
2322
2323            cb_node->in_use.fetch_sub(1);
2324        }
2325
2326        auto pFence = GetFenceNode(dev_data, submission.fence);
2327        if (pFence) {
2328            pFence->state = FENCE_RETIRED;
2329        }
2330
2331        pQueue->submissions.pop_front();
2332        pQueue->seq++;
2333    }
2334
2335    // Roll other queues forward to the highest seq we saw a wait for
2336    for (auto qs : otherQueueSeqs) {
2337        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
2338    }
2339}
2340
2341// Submit a fence to a queue, delimiting previous fences and previous untracked
2342// work by it.
2343static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2344    pFence->state = FENCE_INFLIGHT;
2345    pFence->signaler.first = pQueue->queue;
2346    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2347}
2348
2349static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2350    bool skip = false;
2351    if ((pCB->in_use.load() || current_submit_count > 1) &&
2352        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2353        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2354                        __LINE__, VALIDATION_ERROR_31a0008e, "DS",
2355                        "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
2356                        validation_error_map[VALIDATION_ERROR_31a0008e]);
2357    }
2358    return skip;
2359}
2360
2361static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2362                                       int current_submit_count, UNIQUE_VALIDATION_ERROR_CODE vu_id) {
2363    bool skip = false;
2364    if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2365    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2366    if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2367        (cb_state->submitCount + current_submit_count > 1)) {
2368        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2369                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
2370                        "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
2371                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
2372                        cb_state->commandBuffer, cb_state->submitCount + current_submit_count);
2373    }
2374
2375    // Validate that cmd buffers have been updated
2376    if (CB_RECORDED != cb_state->state) {
2377        if (CB_INVALID == cb_state->state) {
2378            skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2379        } else if (CB_NEW == cb_state->state) {
2380            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2381                            (uint64_t)(cb_state->commandBuffer), __LINE__, vu_id, "DS",
2382                            "Command buffer 0x%p used in the call to %s is unrecorded and contains no commands. %s",
2383                            cb_state->commandBuffer, call_source, validation_error_map[vu_id]);
2384        } else {  // Flag error for using CB w/o vkEndCommandBuffer() called
2385            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2386                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
2387                            "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!",
2388                            cb_state->commandBuffer, call_source);
2389        }
2390    }
2391    return skip;
2392}
2393
2394static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2395    bool skip = false;
2396
2397    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2398    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2399    //  should then be flagged prior to calling this function
2400    for (auto drawDataElement : cb_node->drawData) {
2401        for (auto buffer : drawDataElement.buffers) {
2402            auto buffer_state = GetBufferState(dev_data, buffer);
2403            if (!buffer_state) {
2404                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2405                                HandleToUint64(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
2406                                "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", HandleToUint64(buffer));
2407            }
2408        }
2409    }
2410    return skip;
2411}
2412
2413// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
2414bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
2415                           const uint32_t *indices) {
2416    bool found = false;
2417    bool skip = false;
2418    auto queue_state = GetQueueState(dev_data, queue);
2419    if (queue_state) {
2420        for (uint32_t i = 0; i < count; i++) {
2421            if (indices[i] == queue_state->queueFamilyIndex) {
2422                found = true;
2423                break;
2424            }
2425        }
2426
2427        if (!found) {
2428            skip = log_msg(
2429                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type], object->handle, __LINE__,
2430                DRAWSTATE_INVALID_QUEUE_FAMILY, "DS", "vkQueueSubmit: Command buffer 0x%" PRIxLEAST64 " contains %s 0x%" PRIxLEAST64
2431                                                      " which was not created allowing concurrent access to this queue family %d.",
2432                HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle, queue_state->queueFamilyIndex);
2433        }
2434    }
2435    return skip;
2436}
2437
2438// Validate that queueFamilyIndices of primary command buffers match this queue
2439// Secondary command buffers were previously validated in vkCmdExecuteCommands().
2440static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2441    bool skip = false;
2442    auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
2443    auto queue_state = GetQueueState(dev_data, queue);
2444
2445    if (pPool && queue_state) {
2446        if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2447            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2448                            HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_31a00094, "DS",
2449                            "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
2450                            "0x%p from queue family %d. %s",
2451                            pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
2452                            validation_error_map[VALIDATION_ERROR_31a00094]);
2453        }
2454
2455        // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2456        for (auto object : pCB->object_bindings) {
2457            if (object.type == kVulkanObjectTypeImage) {
2458                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
2459                if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2460                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2461                                                  image_state->createInfo.pQueueFamilyIndices);
2462                }
2463            } else if (object.type == kVulkanObjectTypeBuffer) {
2464                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
2465                if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2466                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2467                                                  buffer_state->createInfo.pQueueFamilyIndices);
2468                }
2469            }
2470        }
2471    }
2472
2473    return skip;
2474}
2475
2476static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2477    // Track in-use for resources off of primary and any secondary CBs
2478    bool skip = false;
2479
2480    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2481    // on device
2482    skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2483
2484    skip |= validateResources(dev_data, pCB);
2485
2486    for (auto pSubCB : pCB->linkedCommandBuffers) {
2487        skip |= validateResources(dev_data, pSubCB);
2488        // TODO: replace with invalidateCommandBuffers() at recording.
2489        if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2490            !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2491            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2492                    __LINE__, VALIDATION_ERROR_31a00092, "DS",
2493                    "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
2494                    "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
2495                    pCB->commandBuffer, pSubCB->commandBuffer, pSubCB->primaryCommandBuffer,
2496                    validation_error_map[VALIDATION_ERROR_31a00092]);
2497        }
2498    }
2499
2500    skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count, VALIDATION_ERROR_31a00090);
2501
2502    return skip;
2503}
2504
2505static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2506    bool skip = false;
2507
2508    if (pFence) {
2509        if (pFence->state == FENCE_INFLIGHT) {
2510            // TODO: opportunities for VALIDATION_ERROR_31a00080, VALIDATION_ERROR_316008b4, VALIDATION_ERROR_16400a0e
2511            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2512                            HandleToUint64(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
2513                            "Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
2514        }
2515
2516        else if (pFence->state == FENCE_RETIRED) {
2517            // TODO: opportunities for VALIDATION_ERROR_31a0007e, VALIDATION_ERROR_316008b2, VALIDATION_ERROR_16400a0e
2518            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2519                            HandleToUint64(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2520                            "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
2521                            HandleToUint64(pFence->fence));
2522        }
2523    }
2524
2525    return skip;
2526}
2527
2528static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2529                                      VkFence fence) {
2530    auto pQueue = GetQueueState(dev_data, queue);
2531    auto pFence = GetFenceNode(dev_data, fence);
2532
2533    // Mark the fence in-use.
2534    if (pFence) {
2535        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2536    }
2537
2538    // Now process each individual submit
2539    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2540        std::vector<VkCommandBuffer> cbs;
2541        const VkSubmitInfo *submit = &pSubmits[submit_idx];
2542        vector<SEMAPHORE_WAIT> semaphore_waits;
2543        vector<VkSemaphore> semaphore_signals;
2544        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2545            VkSemaphore semaphore = submit->pWaitSemaphores[i];
2546            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2547            if (pSemaphore) {
2548                if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
2549                    semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
2550                    pSemaphore->in_use.fetch_add(1);
2551                }
2552                pSemaphore->signaler.first = VK_NULL_HANDLE;
2553                pSemaphore->signaled = false;
2554            }
2555        }
2556        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2557            VkSemaphore semaphore = submit->pSignalSemaphores[i];
2558            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2559            if (pSemaphore) {
2560                pSemaphore->signaler.first = queue;
2561                pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
2562                pSemaphore->signaled = true;
2563                pSemaphore->in_use.fetch_add(1);
2564                semaphore_signals.push_back(semaphore);
2565            }
2566        }
2567        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2568            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2569            if (cb_node) {
2570                cbs.push_back(submit->pCommandBuffers[i]);
2571                for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2572                    cbs.push_back(secondaryCmdBuffer->commandBuffer);
2573                }
2574                UpdateCmdBufImageLayouts(dev_data, cb_node);
2575                incrementResources(dev_data, cb_node);
2576                for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2577                    incrementResources(dev_data, secondaryCmdBuffer);
2578                }
2579            }
2580        }
2581        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
2582                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
2583    }
2584
2585    if (pFence && !submitCount) {
2586        // If no submissions, but just dropping a fence on the end of the queue,
2587        // record an empty submission with just the fence, so we can determine
2588        // its completion.
2589        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
2590                                         fence);
2591    }
2592}
2593
2594static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2595                                       VkFence fence) {
2596    auto pFence = GetFenceNode(dev_data, fence);
2597    bool skip = ValidateFenceForSubmit(dev_data, pFence);
2598    if (skip) {
2599        return true;
2600    }
2601
2602    unordered_set<VkSemaphore> signaled_semaphores;
2603    unordered_set<VkSemaphore> unsignaled_semaphores;
2604    vector<VkCommandBuffer> current_cmds;
2605    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap = dev_data->imageLayoutMap;
2606    // Now verify each individual submit
2607    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2608        const VkSubmitInfo *submit = &pSubmits[submit_idx];
2609        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2610            skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
2611                                                 VALIDATION_ERROR_13c00098, VALIDATION_ERROR_13c0009a);
2612            VkSemaphore semaphore = submit->pWaitSemaphores[i];
2613            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2614            if (pSemaphore) {
2615                if (unsignaled_semaphores.count(semaphore) ||
2616                    (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
2617                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2618                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2619                                    "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
2620                                    HandleToUint64(semaphore));
2621                } else {
2622                    signaled_semaphores.erase(semaphore);
2623                    unsignaled_semaphores.insert(semaphore);
2624                }
2625            }
2626        }
2627        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2628            VkSemaphore semaphore = submit->pSignalSemaphores[i];
2629            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2630            if (pSemaphore) {
2631                if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
2632                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2633                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2634                                    "Queue 0x%p is signaling semaphore 0x%" PRIx64
2635                                    " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
2636                                    queue, HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
2637                } else {
2638                    unsignaled_semaphores.erase(semaphore);
2639                    signaled_semaphores.insert(semaphore);
2640                }
2641            }
2642        }
2643        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2644            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2645            if (cb_node) {
2646                skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, localImageLayoutMap);
2647                current_cmds.push_back(submit->pCommandBuffers[i]);
2648                skip |= validatePrimaryCommandBufferState(
2649                    dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
2650                skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
2651
2652                // Potential early exit here as bad object state may crash in delayed function calls
2653                if (skip) {
2654                    return true;
2655                }
2656
2657                // Call submit-time functions to validate/update state
2658                for (auto &function : cb_node->validate_functions) {
2659                    skip |= function();
2660                }
2661                for (auto &function : cb_node->eventUpdates) {
2662                    skip |= function(queue);
2663                }
2664                for (auto &function : cb_node->queryUpdates) {
2665                    skip |= function(queue);
2666                }
2667            }
2668        }
2669    }
2670    return skip;
2671}
2672
2673VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
2674    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2675    std::unique_lock<std::mutex> lock(global_lock);
2676
2677    bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2678    lock.unlock();
2679
2680    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2681
2682    VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
2683
2684    lock.lock();
2685    PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2686    lock.unlock();
2687    return result;
2688}
2689
2690static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
2691    bool skip = false;
2692    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
2693        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2694                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_16c004f8, "MEM",
2695                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
2696                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
2697                        validation_error_map[VALIDATION_ERROR_16c004f8]);
2698    }
2699    return skip;
2700}
2701
2702static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
2703    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
2704    return;
2705}
2706
2707VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
2708                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
2709    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
2710    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2711    std::unique_lock<std::mutex> lock(global_lock);
2712    bool skip = PreCallValidateAllocateMemory(dev_data);
2713    if (!skip) {
2714        lock.unlock();
2715        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
2716        lock.lock();
2717        if (VK_SUCCESS == result) {
2718            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
2719        }
2720    }
2721    return result;
2722}
2723
2724// For given obj node, if it is use, flag a validation error and return callback result, else return false
2725bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
2726                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
2727    if (dev_data->instance_data->disabled.object_in_use) return false;
2728    bool skip = false;
2729    if (obj_node->in_use.load()) {
2730        skip |=
2731            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
2732                    __LINE__, error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
2733                    object_string[obj_struct.type], obj_struct.handle, validation_error_map[error_code]);
2734    }
2735    return skip;
2736}
2737
2738static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
2739    *mem_info = GetMemObjInfo(dev_data, mem);
2740    *obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
2741    if (dev_data->instance_data->disabled.free_memory) return false;
2742    bool skip = false;
2743    if (*mem_info) {
2744        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_2880054a);
2745    }
2746    return skip;
2747}
2748
2749static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
2750    // Clear mem binding for any bound objects
2751    for (auto obj : mem_info->obj_bindings) {
2752        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle, __LINE__,
2753                MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
2754                obj.handle, HandleToUint64(mem_info->mem));
2755        switch (obj.type) {
2756            case kVulkanObjectTypeImage: {
2757                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
2758                assert(image_state);  // Any destroyed images should already be removed from bindings
2759                image_state->binding.mem = MEMORY_UNBOUND;
2760                break;
2761            }
2762            case kVulkanObjectTypeBuffer: {
2763                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
2764                assert(buffer_state);  // Any destroyed buffers should already be removed from bindings
2765                buffer_state->binding.mem = MEMORY_UNBOUND;
2766                break;
2767            }
2768            default:
2769                // Should only have buffer or image objects bound to memory
2770                assert(0);
2771        }
2772    }
2773    // Any bound cmd buffers are now invalid
2774    invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
2775    dev_data->memObjMap.erase(mem);
2776}
2777
2778VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
2779    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2780    DEVICE_MEM_INFO *mem_info = nullptr;
2781    VK_OBJECT obj_struct;
2782    std::unique_lock<std::mutex> lock(global_lock);
2783    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
2784    if (!skip) {
2785        lock.unlock();
2786        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
2787        lock.lock();
2788        if (mem != VK_NULL_HANDLE) {
2789            PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
2790        }
2791    }
2792}
2793
2794// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
2795//  and that the size of the map range should be:
2796//  1. Not zero
2797//  2. Within the size of the memory allocation
2798static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
2799    bool skip = false;
2800
2801    if (size == 0) {
2802        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2803                       HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2804                       "VkMapMemory: Attempting to map memory range of size zero");
2805    }
2806
2807    auto mem_element = dev_data->memObjMap.find(mem);
2808    if (mem_element != dev_data->memObjMap.end()) {
2809        auto mem_info = mem_element->second.get();
2810        // It is an application error to call VkMapMemory on an object that is already mapped
2811        if (mem_info->mem_range.size != 0) {
2812            skip =
2813                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2814                        HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2815                        "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, HandleToUint64(mem));
2816        }
2817
2818        // Validate that offset + size is within object's allocationSize
2819        if (size == VK_WHOLE_SIZE) {
2820            if (offset >= mem_info->alloc_info.allocationSize) {
2821                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2822                               HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2823                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
2824                               " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
2825                               offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
2826            }
2827        } else {
2828            if ((offset + size) > mem_info->alloc_info.allocationSize) {
2829                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2830                               HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200552, "MEM",
2831                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s",
2832                               offset, size + offset, mem_info->alloc_info.allocationSize,
2833                               validation_error_map[VALIDATION_ERROR_31200552]);
2834            }
2835        }
2836    }
2837    return skip;
2838}
2839
2840static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
2841    auto mem_info = GetMemObjInfo(dev_data, mem);
2842    if (mem_info) {
2843        mem_info->mem_range.offset = offset;
2844        mem_info->mem_range.size = size;
2845    }
2846}
2847
2848static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
2849    bool skip = false;
2850    auto mem_info = GetMemObjInfo(dev_data, mem);
2851    if (mem_info) {
2852        if (!mem_info->mem_range.size) {
2853            // Valid Usage: memory must currently be mapped
2854            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2855                           HandleToUint64(mem), __LINE__, VALIDATION_ERROR_33600562, "MEM",
2856                           "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", HandleToUint64(mem),
2857                           validation_error_map[VALIDATION_ERROR_33600562]);
2858        }
2859        mem_info->mem_range.size = 0;
2860        if (mem_info->shadow_copy) {
2861            free(mem_info->shadow_copy_base);
2862            mem_info->shadow_copy_base = 0;
2863            mem_info->shadow_copy = 0;
2864        }
2865    }
2866    return skip;
2867}
2868
2869// Guard value for pad data
2870static char NoncoherentMemoryFillValue = 0xb;
2871
2872static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
2873                                     void **ppData) {
2874    auto mem_info = GetMemObjInfo(dev_data, mem);
2875    if (mem_info) {
2876        mem_info->p_driver_data = *ppData;
2877        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
2878        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
2879            mem_info->shadow_copy = 0;
2880        } else {
2881            if (size == VK_WHOLE_SIZE) {
2882                size = mem_info->alloc_info.allocationSize - offset;
2883            }
2884            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
2885            assert(SafeModulo(mem_info->shadow_pad_size,
2886                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
2887            // Ensure start of mapped region reflects hardware alignment constraints
2888            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
2889
2890            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
2891            uint64_t start_offset = offset % map_alignment;
2892            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
2893            mem_info->shadow_copy_base =
2894                malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
2895
2896            mem_info->shadow_copy =
2897                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
2898                                         ~(map_alignment - 1)) +
2899                start_offset;
2900            assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
2901                                  map_alignment) == 0);
2902
2903            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
2904            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
2905        }
2906    }
2907}
2908
2909// Verify that state for fence being waited on is appropriate. That is,
2910//  a fence being waited on should not already be signaled and
2911//  it should have been submitted on a queue or during acquire next image
2912static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
2913    bool skip = false;
2914
2915    auto pFence = GetFenceNode(dev_data, fence);
2916    if (pFence) {
2917        if (pFence->state == FENCE_UNSIGNALED) {
2918            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2919                            HandleToUint64(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2920                            "%s called for fence 0x%" PRIxLEAST64
2921                            " which has not been submitted on a Queue or during "
2922                            "acquire next image.",
2923                            apiCall, HandleToUint64(fence));
2924        }
2925    }
2926    return skip;
2927}
2928
2929static void RetireFence(layer_data *dev_data, VkFence fence) {
2930    auto pFence = GetFenceNode(dev_data, fence);
2931    if (pFence->signaler.first != VK_NULL_HANDLE) {
2932        // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
2933        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
2934    } else {
2935        // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
2936        // the fence as retired.
2937        pFence->state = FENCE_RETIRED;
2938    }
2939}
2940
2941static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
2942    if (dev_data->instance_data->disabled.wait_for_fences) return false;
2943    bool skip = false;
2944    for (uint32_t i = 0; i < fence_count; i++) {
2945        skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
2946        skip |= VerifyQueueStateToFence(dev_data, fences[i]);
2947    }
2948    return skip;
2949}
2950
2951static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
2952    // When we know that all fences are complete we can clean/remove their CBs
2953    if ((VK_TRUE == wait_all) || (1 == fence_count)) {
2954        for (uint32_t i = 0; i < fence_count; i++) {
2955            RetireFence(dev_data, fences[i]);
2956        }
2957    }
2958    // NOTE : Alternate case not handled here is when some fences have completed. In
2959    //  this case for app to guarantee which fences completed it will have to call
2960    //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
2961}
2962
2963VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
2964                                             uint64_t timeout) {
2965    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2966    // Verify fence status of submitted fences
2967    std::unique_lock<std::mutex> lock(global_lock);
2968    bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
2969    lock.unlock();
2970    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2971
2972    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
2973
2974    if (result == VK_SUCCESS) {
2975        lock.lock();
2976        PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
2977        lock.unlock();
2978    }
2979    return result;
2980}
2981
2982static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
2983    if (dev_data->instance_data->disabled.get_fence_state) return false;
2984    return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
2985}
2986
2987static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
2988
2989VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
2990    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2991    std::unique_lock<std::mutex> lock(global_lock);
2992    bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
2993    lock.unlock();
2994    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2995
2996    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
2997    if (result == VK_SUCCESS) {
2998        lock.lock();
2999        PostCallRecordGetFenceStatus(dev_data, fence);
3000        lock.unlock();
3001    }
3002    return result;
3003}
3004
3005static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
3006    // Add queue to tracking set only if it is new
3007    auto result = dev_data->queues.emplace(queue);
3008    if (result.second == true) {
3009        QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
3010        queue_state->queue = queue;
3011        queue_state->queueFamilyIndex = q_family_index;
3012        queue_state->seq = 0;
3013    }
3014}
3015
3016VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3017    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3018    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
3019    std::lock_guard<std::mutex> lock(global_lock);
3020
3021    PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
3022}
3023
3024static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
3025    *queue_state = GetQueueState(dev_data, queue);
3026    if (dev_data->instance_data->disabled.queue_wait_idle) return false;
3027    return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
3028}
3029
3030static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
3031    RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
3032}
3033
3034VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
3035    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3036    QUEUE_STATE *queue_state = nullptr;
3037    std::unique_lock<std::mutex> lock(global_lock);
3038    bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
3039    lock.unlock();
3040    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3041    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
3042    if (VK_SUCCESS == result) {
3043        lock.lock();
3044        PostCallRecordQueueWaitIdle(dev_data, queue_state);
3045        lock.unlock();
3046    }
3047    return result;
3048}
3049
3050static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
3051    if (dev_data->instance_data->disabled.device_wait_idle) return false;
3052    bool skip = false;
3053    for (auto &queue : dev_data->queueMap) {
3054        skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3055    }
3056    return skip;
3057}
3058
3059static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
3060    for (auto &queue : dev_data->queueMap) {
3061        RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3062    }
3063}
3064
3065VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
3066    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3067    std::unique_lock<std::mutex> lock(global_lock);
3068    bool skip = PreCallValidateDeviceWaitIdle(dev_data);
3069    lock.unlock();
3070    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3071    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
3072    if (VK_SUCCESS == result) {
3073        lock.lock();
3074        PostCallRecordDeviceWaitIdle(dev_data);
3075        lock.unlock();
3076    }
3077    return result;
3078}
3079
3080static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
3081    *fence_node = GetFenceNode(dev_data, fence);
3082    *obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
3083    if (dev_data->instance_data->disabled.destroy_fence) return false;
3084    bool skip = false;
3085    if (*fence_node) {
3086        if ((*fence_node)->state == FENCE_INFLIGHT) {
3087            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3088                            HandleToUint64(fence), __LINE__, VALIDATION_ERROR_24e008c0, "DS", "Fence 0x%" PRIx64 " is in use. %s",
3089                            HandleToUint64(fence), validation_error_map[VALIDATION_ERROR_24e008c0]);
3090        }
3091    }
3092    return skip;
3093}
3094
3095static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
3096
3097VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
3098    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3099    // Common data objects used pre & post call
3100    FENCE_NODE *fence_node = nullptr;
3101    VK_OBJECT obj_struct;
3102    std::unique_lock<std::mutex> lock(global_lock);
3103    bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
3104
3105    if (!skip) {
3106        lock.unlock();
3107        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
3108        lock.lock();
3109        PostCallRecordDestroyFence(dev_data, fence);
3110    }
3111}
3112
3113static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
3114                                            VK_OBJECT *obj_struct) {
3115    *sema_node = GetSemaphoreNode(dev_data, semaphore);
3116    *obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
3117    if (dev_data->instance_data->disabled.destroy_semaphore) return false;
3118    bool skip = false;
3119    if (*sema_node) {
3120        skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_268008e2);
3121    }
3122    return skip;
3123}
3124
3125static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
3126
3127VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
3128    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3129    SEMAPHORE_NODE *sema_node;
3130    VK_OBJECT obj_struct;
3131    std::unique_lock<std::mutex> lock(global_lock);
3132    bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
3133    if (!skip) {
3134        lock.unlock();
3135        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
3136        lock.lock();
3137        PostCallRecordDestroySemaphore(dev_data, semaphore);
3138    }
3139}
3140
3141static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
3142    *event_state = GetEventNode(dev_data, event);
3143    *obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
3144    if (dev_data->instance_data->disabled.destroy_event) return false;
3145    bool skip = false;
3146    if (*event_state) {
3147        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_24c008f2);
3148    }
3149    return skip;
3150}
3151
3152static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
3153    invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
3154    dev_data->eventMap.erase(event);
3155}
3156
3157VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3158    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3159    EVENT_STATE *event_state = nullptr;
3160    VK_OBJECT obj_struct;
3161    std::unique_lock<std::mutex> lock(global_lock);
3162    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
3163    if (!skip) {
3164        lock.unlock();
3165        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
3166        lock.lock();
3167        if (event != VK_NULL_HANDLE) {
3168            PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
3169        }
3170    }
3171}
3172
3173static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
3174                                            VK_OBJECT *obj_struct) {
3175    *qp_state = GetQueryPoolNode(dev_data, query_pool);
3176    *obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
3177    if (dev_data->instance_data->disabled.destroy_query_pool) return false;
3178    bool skip = false;
3179    if (*qp_state) {
3180        skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_26200632);
3181    }
3182    return skip;
3183}
3184
3185static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
3186                                           VK_OBJECT obj_struct) {
3187    invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
3188    dev_data->queryPoolMap.erase(query_pool);
3189}
3190
3191VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
3192    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3193    QUERY_POOL_NODE *qp_state = nullptr;
3194    VK_OBJECT obj_struct;
3195    std::unique_lock<std::mutex> lock(global_lock);
3196    bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
3197    if (!skip) {
3198        lock.unlock();
3199        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
3200        lock.lock();
3201        if (queryPool != VK_NULL_HANDLE) {
3202            PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
3203        }
3204    }
3205}
3206static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3207                                               uint32_t query_count, VkQueryResultFlags flags,
3208                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3209    // TODO: clean this up, it's insanely wasteful.
3210    for (auto cmd_buffer : dev_data->commandBufferMap) {
3211        if (cmd_buffer.second->in_use.load()) {
3212            for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
3213                (*queries_in_flight)[query_state_pair.first].push_back(
3214                    cmd_buffer.first);
3215            }
3216        }
3217    }
3218    if (dev_data->instance_data->disabled.get_query_pool_results) return false;
3219    bool skip = false;
3220    for (uint32_t i = 0; i < query_count; ++i) {
3221        QueryObject query = {query_pool, first_query + i};
3222        auto qif_pair = queries_in_flight->find(query);
3223        auto query_state_pair = dev_data->queryToStateMap.find(query);
3224        if (query_state_pair != dev_data->queryToStateMap.end()) {
3225            // Available and in flight
3226            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3227                query_state_pair->second) {
3228                for (auto cmd_buffer : qif_pair->second) {
3229                    auto cb = GetCBNode(dev_data, cmd_buffer);
3230                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3231                    if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
3232                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3233                                        VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3234                                        "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
3235                                        HandleToUint64(query_pool), first_query + i);
3236                    }
3237                }
3238                // Unavailable and in flight
3239            } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3240                       !query_state_pair->second) {
3241                // TODO : Can there be the same query in use by multiple command buffers in flight?
3242                bool make_available = false;
3243                for (auto cmd_buffer : qif_pair->second) {
3244                    auto cb = GetCBNode(dev_data, cmd_buffer);
3245                    make_available |= cb->queryToStateMap[query];
3246                }
3247                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
3248                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3249                                    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3250                                    "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3251                                    HandleToUint64(query_pool), first_query + i);
3252                }
3253                // Unavailable
3254            } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
3255                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3256                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3257                                "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3258                                HandleToUint64(query_pool), first_query + i);
3259                // Uninitialized
3260            } else if (query_state_pair == dev_data->queryToStateMap.end()) {
3261                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3262                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3263                                "Cannot get query results on queryPool 0x%" PRIx64
3264                                " with index %d as data has not been collected for this index.",
3265                                HandleToUint64(query_pool), first_query + i);
3266            }
3267        }
3268    }
3269    return skip;
3270}
3271
3272static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3273                                              uint32_t query_count,
3274                                              unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3275    for (uint32_t i = 0; i < query_count; ++i) {
3276        QueryObject query = {query_pool, first_query + i};
3277        auto qif_pair = queries_in_flight->find(query);
3278        auto query_state_pair = dev_data->queryToStateMap.find(query);
3279        if (query_state_pair != dev_data->queryToStateMap.end()) {
3280            // Available and in flight
3281            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3282                query_state_pair->second) {
3283                for (auto cmd_buffer : qif_pair->second) {
3284                    auto cb = GetCBNode(dev_data, cmd_buffer);
3285                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3286                    if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
3287                        for (auto event : query_event_pair->second) {
3288                            dev_data->eventMap[event].needsSignaled = true;
3289                        }
3290                    }
3291                }
3292            }
3293        }
3294    }
3295}
3296
3297VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
3298                                                   size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
3299    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3300    unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
3301    std::unique_lock<std::mutex> lock(global_lock);
3302    bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
3303    lock.unlock();
3304    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3305    VkResult result =
3306        dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
3307    lock.lock();
3308    PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
3309    lock.unlock();
3310    return result;
3311}
3312
3313// Return true if given ranges intersect, else false
3314// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
3315//  in an error so not checking that here
3316// pad_ranges bool indicates a linear and non-linear comparison which requires padding
3317// In the case where padding is required, if an alias is encountered then a validation error is reported and skip
3318//  may be set by the callback function so caller should merge in skip value if padding case is possible.
3319// This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
3320static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
3321                            bool skip_checks) {
3322    *skip = false;
3323    auto r1_start = range1->start;
3324    auto r1_end = range1->end;
3325    auto r2_start = range2->start;
3326    auto r2_end = range2->end;
3327    VkDeviceSize pad_align = 1;
3328    if (range1->linear != range2->linear) {
3329        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
3330    }
3331    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
3332    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
3333
3334    if (!skip_checks && (range1->linear != range2->linear)) {
3335        // In linear vs. non-linear case, warn of aliasing
3336        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
3337        const char *r1_type_str = range1->image ? "image" : "buffer";
3338        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
3339        const char *r2_type_str = range2->image ? "image" : "buffer";
3340        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
3341        *skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
3342                         MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
3343                                                           " which may indicate a bug. For further info refer to the "
3344                                                           "Buffer-Image Granularity section of the Vulkan specification. "
3345                                                           "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
3346                                                           "xhtml/vkspec.html#resources-bufferimagegranularity)",
3347                         r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
3348    }
3349    // Ranges intersect
3350    return true;
3351}
3352// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
3353bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
3354    // Create a local MEMORY_RANGE struct to wrap offset/size
3355    MEMORY_RANGE range_wrap;
3356    // Synch linear with range1 to avoid padding and potential validation error case
3357    range_wrap.linear = range1->linear;
3358    range_wrap.start = offset;
3359    range_wrap.end = end;
3360    bool tmp_bool;
3361    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
3362}
3363// For given mem_info, set all ranges valid that intersect [offset-end] range
3364// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
3365static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
3366    bool tmp_bool = false;
3367    MEMORY_RANGE map_range = {};
3368    map_range.linear = true;
3369    map_range.start = offset;
3370    map_range.end = end;
3371    for (auto &handle_range_pair : mem_info->bound_ranges) {
3372        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool, false)) {
3373            // TODO : WARN here if tmp_bool true?
3374            handle_range_pair.second.valid = true;
3375        }
3376    }
3377}
3378
3379static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
3380                                      VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
3381                                      bool is_linear, const char *api_name) {
3382    bool skip = false;
3383
3384    MEMORY_RANGE range;
3385    range.image = is_image;
3386    range.handle = handle;
3387    range.linear = is_linear;
3388    range.valid = mem_info->global_valid;
3389    range.memory = mem_info->mem;
3390    range.start = memoryOffset;
3391    range.size = memRequirements.size;
3392    range.end = memoryOffset + memRequirements.size - 1;
3393    range.aliases.clear();
3394
3395    // Check for aliasing problems.
3396    for (auto &obj_range_pair : mem_info->bound_ranges) {
3397        auto check_range = &obj_range_pair.second;
3398        bool intersection_error = false;
3399        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
3400            skip |= intersection_error;
3401            range.aliases.insert(check_range);
3402        }
3403    }
3404
3405    if (memoryOffset >= mem_info->alloc_info.allocationSize) {
3406        UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_1740082c : VALIDATION_ERROR_1700080e;
3407        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3408                       HandleToUint64(mem_info->mem), __LINE__, error_code, "MEM",
3409                       "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
3410                       "), memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ". %s",
3411                       api_name, HandleToUint64(mem_info->mem), handle, memoryOffset, mem_info->alloc_info.allocationSize,
3412                       validation_error_map[error_code]);
3413    }
3414
3415    return skip;
3416}
3417
3418// Object with given handle is being bound to memory w/ given mem_info struct.
3419//  Track the newly bound memory range with given memoryOffset
3420//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
3421//  and non-linear range incorrectly overlap.
3422// Return true if an error is flagged and the user callback returns "true", otherwise false
3423// is_image indicates an image object, otherwise handle is for a buffer
3424// is_linear indicates a buffer or linear image
3425static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
3426                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
3427    MEMORY_RANGE range;
3428
3429    range.image = is_image;
3430    range.handle = handle;
3431    range.linear = is_linear;
3432    range.valid = mem_info->global_valid;
3433    range.memory = mem_info->mem;
3434    range.start = memoryOffset;
3435    range.size = memRequirements.size;
3436    range.end = memoryOffset + memRequirements.size - 1;
3437    range.aliases.clear();
3438    // Update Memory aliasing
3439    // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
3440    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
3441    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
3442    for (auto &obj_range_pair : mem_info->bound_ranges) {
3443        auto check_range = &obj_range_pair.second;
3444        bool intersection_error = false;
3445        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
3446            range.aliases.insert(check_range);
3447            tmp_alias_ranges.insert(check_range);
3448        }
3449    }
3450    mem_info->bound_ranges[handle] = std::move(range);
3451    for (auto tmp_range : tmp_alias_ranges) {
3452        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
3453    }
3454    if (is_image)
3455        mem_info->bound_images.insert(handle);
3456    else
3457        mem_info->bound_buffers.insert(handle);
3458}
3459
3460static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
3461                                           VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
3462                                           const char *api_name) {
3463    return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
3464}
3465static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3466                                   VkMemoryRequirements mem_reqs, bool is_linear) {
3467    InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
3468}
3469
3470static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
3471                                            VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
3472    return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
3473}
3474static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3475                                    VkMemoryRequirements mem_reqs) {
3476    InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
3477}
3478
3479// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
3480//  is_image indicates if handle is for image or buffer
3481//  This function will also remove the handle-to-index mapping from the appropriate
3482//  map and clean up any aliases for range being removed.
3483static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
3484    auto erase_range = &mem_info->bound_ranges[handle];
3485    for (auto alias_range : erase_range->aliases) {
3486        alias_range->aliases.erase(erase_range);
3487    }
3488    erase_range->aliases.clear();
3489    mem_info->bound_ranges.erase(handle);
3490    if (is_image) {
3491        mem_info->bound_images.erase(handle);
3492    } else {
3493        mem_info->bound_buffers.erase(handle);
3494    }
3495}
3496
3497void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
3498
3499void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
3500
3501VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
3502    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3503    BUFFER_STATE *buffer_state = nullptr;
3504    VK_OBJECT obj_struct;
3505    std::unique_lock<std::mutex> lock(global_lock);
3506    bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
3507    if (!skip) {
3508        lock.unlock();
3509        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
3510        lock.lock();
3511        if (buffer != VK_NULL_HANDLE) {
3512            PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
3513        }
3514    }
3515}
3516
3517VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
3518    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3519    // Common data objects used pre & post call
3520    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
3521    VK_OBJECT obj_struct;
3522    std::unique_lock<std::mutex> lock(global_lock);
3523    // Validate state before calling down chain, update common data if we'll be calling down chain
3524    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
3525    if (!skip) {
3526        lock.unlock();
3527        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
3528        lock.lock();
3529        if (bufferView != VK_NULL_HANDLE) {
3530            PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
3531        }
3532    }
3533}
3534
3535VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
3536    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3537    IMAGE_STATE *image_state = nullptr;
3538    VK_OBJECT obj_struct;
3539    std::unique_lock<std::mutex> lock(global_lock);
3540    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
3541    if (!skip) {
3542        lock.unlock();
3543        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
3544        lock.lock();
3545        if (image != VK_NULL_HANDLE) {
3546            PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
3547        }
3548    }
3549}
3550
3551static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
3552                                const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3553    bool skip = false;
3554    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
3555        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3556                       HandleToUint64(mem_info->mem), __LINE__, msgCode, "MT",
3557                       "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
3558                       "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
3559                       funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem),
3560                       validation_error_map[msgCode]);
3561    }
3562    return skip;
3563}
3564
3565static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3566                                            VkDeviceSize memoryOffset) {
3567    bool skip = false;
3568    if (buffer_state) {
3569        std::unique_lock<std::mutex> lock(global_lock);
3570        // Track objects tied to memory
3571        uint64_t buffer_handle = HandleToUint64(buffer);
3572        skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
3573        if (!buffer_state->memory_requirements_checked) {
3574            // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
3575            // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
3576            // vkGetBufferMemoryRequirements()
3577            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3578                            buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
3579                            "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
3580                            " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
3581                            buffer_handle);
3582            // Make the call for them so we can verify the state
3583            lock.unlock();
3584            dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
3585            lock.lock();
3586        }
3587
3588        // Validate bound memory range information
3589        auto mem_info = GetMemObjInfo(dev_data, mem);
3590        if (mem_info) {
3591            skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements,
3592                                                    "vkBindBufferMemory()");
3593            skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
3594                                        VALIDATION_ERROR_17000816);
3595        }
3596
3597        // Validate memory requirements alignment
3598        if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
3599            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3600                            buffer_handle, __LINE__, VALIDATION_ERROR_17000818, "DS",
3601                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64
3602                            " but must be an integer multiple of the "
3603                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
3604                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3605                            memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_17000818]);
3606        }
3607
3608        // Validate memory requirements size
3609        if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
3610            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3611                            buffer_handle, __LINE__, VALIDATION_ERROR_1700081a, "DS",
3612                            "vkBindBufferMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
3613                            " but must be at least as large as "
3614                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
3615                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3616                            mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size,
3617                            validation_error_map[VALIDATION_ERROR_1700081a]);
3618        }
3619
3620        // Validate device limits alignments
3621        static const VkBufferUsageFlagBits usage_list[3] = {
3622            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
3623            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
3624        static const char *memory_type[3] = {"texel", "uniform", "storage"};
3625        static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
3626                                             "minStorageBufferOffsetAlignment"};
3627
3628        // TODO:  vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
3629        // clang-format off
3630        static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_17000810, VALIDATION_ERROR_17000812,
3631            VALIDATION_ERROR_17000814 };
3632        // clang-format on
3633
3634        // Keep this one fresh!
3635        const VkDeviceSize offset_requirement[3] = {
3636            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
3637            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
3638            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
3639        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
3640
3641        for (int i = 0; i < 3; i++) {
3642            if (usage & usage_list[i]) {
3643                if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
3644                    skip |= log_msg(
3645                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
3646                        __LINE__, msgCode[i], "DS", "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64
3647                                                    " but must be a multiple of "
3648                                                    "device limit %s 0x%" PRIxLEAST64 ". %s",
3649                        memory_type[i], memoryOffset, offset_name[i], offset_requirement[i], validation_error_map[msgCode[i]]);
3650                }
3651            }
3652        }
3653    }
3654    return skip;
3655}
3656
3657static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3658                                           VkDeviceSize memoryOffset) {
3659    if (buffer_state) {
3660        std::unique_lock<std::mutex> lock(global_lock);
3661        // Track bound memory range information
3662        auto mem_info = GetMemObjInfo(dev_data, mem);
3663        if (mem_info) {
3664            InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
3665        }
3666
3667        // Track objects tied to memory
3668        uint64_t buffer_handle = HandleToUint64(buffer);
3669        SetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
3670
3671        buffer_state->binding.mem = mem;
3672        buffer_state->binding.offset = memoryOffset;
3673        buffer_state->binding.size = buffer_state->requirements.size;
3674    }
3675}
3676
3677VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
3678    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3679    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3680    auto buffer_state = GetBufferState(dev_data, buffer);
3681    bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
3682    if (!skip) {
3683        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
3684        if (result == VK_SUCCESS) {
3685            PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
3686        }
3687    }
3688    return result;
3689}
3690
3691VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
3692                                                       VkMemoryRequirements *pMemoryRequirements) {
3693    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3694    dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
3695    auto buffer_state = GetBufferState(dev_data, buffer);
3696    if (buffer_state) {
3697        buffer_state->requirements = *pMemoryRequirements;
3698        buffer_state->memory_requirements_checked = true;
3699    }
3700}
3701
3702VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
3703    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3704    dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
3705    auto image_state = GetImageState(dev_data, image);
3706    if (image_state) {
3707        image_state->requirements = *pMemoryRequirements;
3708        image_state->memory_requirements_checked = true;
3709    }
3710}
3711
3712VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
3713    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3714    // Common data objects used pre & post call
3715    IMAGE_VIEW_STATE *image_view_state = nullptr;
3716    VK_OBJECT obj_struct;
3717    std::unique_lock<std::mutex> lock(global_lock);
3718    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
3719    if (!skip) {
3720        lock.unlock();
3721        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
3722        lock.lock();
3723        if (imageView != VK_NULL_HANDLE) {
3724            PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
3725        }
3726    }
3727}
3728
3729VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
3730                                               const VkAllocationCallbacks *pAllocator) {
3731    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3732
3733    std::unique_lock<std::mutex> lock(global_lock);
3734    dev_data->shaderModuleMap.erase(shaderModule);
3735    lock.unlock();
3736
3737    dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
3738}
3739
3740static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
3741                                           VK_OBJECT *obj_struct) {
3742    *pipeline_state = getPipelineState(dev_data, pipeline);
3743    *obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
3744    if (dev_data->instance_data->disabled.destroy_pipeline) return false;
3745    bool skip = false;
3746    if (*pipeline_state) {
3747        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_25c005fa);
3748    }
3749    return skip;
3750}
3751
3752static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
3753                                          VK_OBJECT obj_struct) {
3754    // Any bound cmd buffers are now invalid
3755    invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
3756    delete getPipelineState(dev_data, pipeline);
3757    dev_data->pipelineMap.erase(pipeline);
3758}
3759
3760VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
3761    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3762    PIPELINE_STATE *pipeline_state = nullptr;
3763    VK_OBJECT obj_struct;
3764    std::unique_lock<std::mutex> lock(global_lock);
3765    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
3766    if (!skip) {
3767        lock.unlock();
3768        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
3769        lock.lock();
3770        if (pipeline != VK_NULL_HANDLE) {
3771            PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
3772        }
3773    }
3774}
3775
3776VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
3777                                                 const VkAllocationCallbacks *pAllocator) {
3778    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3779    std::unique_lock<std::mutex> lock(global_lock);
3780    dev_data->pipelineLayoutMap.erase(pipelineLayout);
3781    lock.unlock();
3782
3783    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
3784}
3785
3786static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
3787                                          VK_OBJECT *obj_struct) {
3788    *sampler_state = GetSamplerState(dev_data, sampler);
3789    *obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
3790    if (dev_data->instance_data->disabled.destroy_sampler) return false;
3791    bool skip = false;
3792    if (*sampler_state) {
3793        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_26600874);
3794    }
3795    return skip;
3796}
3797
3798static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
3799                                         VK_OBJECT obj_struct) {
3800    // Any bound cmd buffers are now invalid
3801    if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
3802    dev_data->samplerMap.erase(sampler);
3803}
3804
3805VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
3806    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3807    SAMPLER_STATE *sampler_state = nullptr;
3808    VK_OBJECT obj_struct;
3809    std::unique_lock<std::mutex> lock(global_lock);
3810    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
3811    if (!skip) {
3812        lock.unlock();
3813        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
3814        lock.lock();
3815        if (sampler != VK_NULL_HANDLE) {
3816            PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
3817        }
3818    }
3819}
3820
3821static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
3822    dev_data->descriptorSetLayoutMap.erase(ds_layout);
3823}
3824
3825VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
3826                                                      const VkAllocationCallbacks *pAllocator) {
3827    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3828    dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
3829    std::unique_lock<std::mutex> lock(global_lock);
3830    PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
3831}
3832
3833static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
3834                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
3835    *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
3836    *obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
3837    if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
3838    bool skip = false;
3839    if (*desc_pool_state) {
3840        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_2440025e);
3841    }
3842    return skip;
3843}
3844
3845static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
3846                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
3847    // Any bound cmd buffers are now invalid
3848    invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
3849    // Free sets that were in this pool
3850    for (auto ds : desc_pool_state->sets) {
3851        freeDescriptorSet(dev_data, ds);
3852    }
3853    dev_data->descriptorPoolMap.erase(descriptorPool);
3854    delete desc_pool_state;
3855}
3856
3857VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
3858                                                 const VkAllocationCallbacks *pAllocator) {
3859    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3860    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
3861    VK_OBJECT obj_struct;
3862    std::unique_lock<std::mutex> lock(global_lock);
3863    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
3864    if (!skip) {
3865        lock.unlock();
3866        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
3867        lock.lock();
3868        if (descriptorPool != VK_NULL_HANDLE) {
3869            PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
3870        }
3871    }
3872}
3873// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
3874//  If this is a secondary command buffer, then make sure its primary is also in-flight
3875//  If primary is not in-flight, then remove secondary from global in-flight set
3876// This function is only valid at a point when cmdBuffer is being reset or freed
3877static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
3878                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
3879    bool skip = false;
3880    if (cb_node->in_use.load()) {
3881        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3882                        HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
3883                        "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
3884                        validation_error_map[error_code]);
3885    }
3886    return skip;
3887}
3888
3889// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
3890static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
3891                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
3892    bool skip = false;
3893    for (auto cmd_buffer : pPool->commandBuffers) {
3894        skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
3895    }
3896    return skip;
3897}
3898
3899VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
3900                                              const VkCommandBuffer *pCommandBuffers) {
3901    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3902    bool skip = false;
3903    std::unique_lock<std::mutex> lock(global_lock);
3904
3905    for (uint32_t i = 0; i < commandBufferCount; i++) {
3906        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
3907        // Delete CB information structure, and remove from commandBufferMap
3908        if (cb_node) {
3909            skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_2840005e);
3910        }
3911    }
3912
3913    if (skip) return;
3914
3915    auto pPool = GetCommandPoolNode(dev_data, commandPool);
3916    for (uint32_t i = 0; i < commandBufferCount; i++) {
3917        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
3918        // Delete CB information structure, and remove from commandBufferMap
3919        if (cb_node) {
3920            // reset prior to delete for data clean-up
3921            // TODO: fix this, it's insane.
3922            resetCB(dev_data, cb_node->commandBuffer);
3923            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
3924            delete cb_node;
3925        }
3926
3927        // Remove commandBuffer reference from commandPoolMap
3928        pPool->commandBuffers.remove(pCommandBuffers[i]);
3929    }
3930    lock.unlock();
3931
3932    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
3933}
3934
3935VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
3936                                                 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
3937    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3938
3939    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
3940
3941    if (VK_SUCCESS == result) {
3942        std::lock_guard<std::mutex> lock(global_lock);
3943        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
3944        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
3945    }
3946    return result;
3947}
3948
3949VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
3950                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
3951    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3952    bool skip = false;
3953    if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
3954        if (!dev_data->enabled_features.pipelineStatisticsQuery) {
3955            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3956                            __LINE__, VALIDATION_ERROR_11c0062e, "DS",
3957                            "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
3958                            "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
3959                            validation_error_map[VALIDATION_ERROR_11c0062e]);
3960        }
3961    }
3962
3963    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3964    if (!skip) {
3965        result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
3966    }
3967    if (result == VK_SUCCESS) {
3968        std::lock_guard<std::mutex> lock(global_lock);
3969        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
3970        qp_node->createInfo = *pCreateInfo;
3971    }
3972    return result;
3973}
3974
3975static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
3976    *cp_state = GetCommandPoolNode(dev_data, pool);
3977    if (dev_data->instance_data->disabled.destroy_command_pool) return false;
3978    bool skip = false;
3979    if (*cp_state) {
3980        // Verify that command buffers in pool are complete (not in-flight)
3981        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_24000052);
3982    }
3983    return skip;
3984}
3985
3986static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
3987    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
3988    for (auto cb : cp_state->commandBuffers) {
3989        auto cb_node = GetCBNode(dev_data, cb);
3990        clear_cmd_buf_and_mem_references(dev_data, cb_node);
3991        // Remove references to this cb_node prior to delete
3992        // TODO : Need better solution here, resetCB?
3993        for (auto obj : cb_node->object_bindings) {
3994            removeCommandBufferBinding(dev_data, &obj, cb_node);
3995        }
3996        for (auto framebuffer : cb_node->framebuffers) {
3997            auto fb_state = GetFramebufferState(dev_data, framebuffer);
3998            if (fb_state) fb_state->cb_bindings.erase(cb_node);
3999        }
4000        dev_data->commandBufferMap.erase(cb);  // Remove this command buffer
4001        delete cb_node;                        // delete CB info structure
4002    }
4003    dev_data->commandPoolMap.erase(pool);
4004}
4005
4006// Destroy commandPool along with all of the commandBuffers allocated from that pool
4007VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
4008    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4009    COMMAND_POOL_NODE *cp_state = nullptr;
4010    std::unique_lock<std::mutex> lock(global_lock);
4011    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
4012    if (!skip) {
4013        lock.unlock();
4014        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
4015        lock.lock();
4016        if (commandPool != VK_NULL_HANDLE) {
4017            PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
4018        }
4019    }
4020}
4021
4022VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4023    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4024    bool skip = false;
4025
4026    std::unique_lock<std::mutex> lock(global_lock);
4027    auto pPool = GetCommandPoolNode(dev_data, commandPool);
4028    skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_32800050);
4029    lock.unlock();
4030
4031    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4032
4033    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
4034
4035    // Reset all of the CBs allocated from this pool
4036    if (VK_SUCCESS == result) {
4037        lock.lock();
4038        for (auto cmdBuffer : pPool->commandBuffers) {
4039            resetCB(dev_data, cmdBuffer);
4040        }
4041        lock.unlock();
4042    }
4043    return result;
4044}
4045
4046VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4047    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4048    bool skip = false;
4049    std::unique_lock<std::mutex> lock(global_lock);
4050    for (uint32_t i = 0; i < fenceCount; ++i) {
4051        auto pFence = GetFenceNode(dev_data, pFences[i]);
4052        if (pFence && pFence->state == FENCE_INFLIGHT) {
4053            skip |=
4054                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4055                        HandleToUint64(pFences[i]), __LINE__, VALIDATION_ERROR_32e008c6, "DS", "Fence 0x%" PRIx64 " is in use. %s",
4056                        HandleToUint64(pFences[i]), validation_error_map[VALIDATION_ERROR_32e008c6]);
4057        }
4058    }
4059    lock.unlock();
4060
4061    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4062
4063    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
4064
4065    if (result == VK_SUCCESS) {
4066        lock.lock();
4067        for (uint32_t i = 0; i < fenceCount; ++i) {
4068            auto pFence = GetFenceNode(dev_data, pFences[i]);
4069            if (pFence) {
4070                pFence->state = FENCE_UNSIGNALED;
4071            }
4072        }
4073        lock.unlock();
4074    }
4075
4076    return result;
4077}
4078
4079// For given cb_nodes, invalidate them and track object causing invalidation
4080void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
4081    for (auto cb_node : cb_nodes) {
4082        if (cb_node->state == CB_RECORDING) {
4083            log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4084                    HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4085                    "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
4086        }
4087        cb_node->state = CB_INVALID;
4088        cb_node->broken_bindings.push_back(obj);
4089
4090        // if secondary, then propagate the invalidation to the primaries that will call us.
4091        if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4092            invalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4093        }
4094    }
4095}
4096
4097static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
4098                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
4099    *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
4100    *obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4101    if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
4102    bool skip = false;
4103    if (*framebuffer_state) {
4104        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_250006f8);
4105    }
4106    return skip;
4107}
4108
4109static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
4110                                             VK_OBJECT obj_struct) {
4111    invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
4112    dev_data->frameBufferMap.erase(framebuffer);
4113}
4114
4115VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
4116    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4117    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
4118    VK_OBJECT obj_struct;
4119    std::unique_lock<std::mutex> lock(global_lock);
4120    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
4121    if (!skip) {
4122        lock.unlock();
4123        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
4124        lock.lock();
4125        if (framebuffer != VK_NULL_HANDLE) {
4126            PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
4127        }
4128    }
4129}
4130
4131static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
4132                                             VK_OBJECT *obj_struct) {
4133    *rp_state = GetRenderPassState(dev_data, render_pass);
4134    *obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
4135    if (dev_data->instance_data->disabled.destroy_renderpass) return false;
4136    bool skip = false;
4137    if (*rp_state) {
4138        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_264006d2);
4139    }
4140    return skip;
4141}
4142
4143static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
4144                                            VK_OBJECT obj_struct) {
4145    invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
4146    dev_data->renderPassMap.erase(render_pass);
4147}
4148
4149VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4150    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4151    RENDER_PASS_STATE *rp_state = nullptr;
4152    VK_OBJECT obj_struct;
4153    std::unique_lock<std::mutex> lock(global_lock);
4154    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
4155    if (!skip) {
4156        lock.unlock();
4157        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
4158        lock.lock();
4159        if (renderPass != VK_NULL_HANDLE) {
4160            PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
4161        }
4162    }
4163}
4164
4165VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
4166                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
4167    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4168    std::unique_lock<std::mutex> lock(global_lock);
4169    bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
4170    lock.unlock();
4171
4172    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4173    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
4174
4175    if (VK_SUCCESS == result) {
4176        lock.lock();
4177        PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
4178        lock.unlock();
4179    }
4180    return result;
4181}
4182
4183VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
4184                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
4185    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4186    std::unique_lock<std::mutex> lock(global_lock);
4187    bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
4188    lock.unlock();
4189    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4190    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
4191    if (VK_SUCCESS == result) {
4192        lock.lock();
4193        PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
4194        lock.unlock();
4195    }
4196    return result;
4197}
4198
4199// Access helper functions for external modules
4200const VkFormatProperties *GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
4201    VkFormatProperties *format_properties = new VkFormatProperties;
4202    instance_layer_data *instance_data =
4203        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4204    instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, format_properties);
4205    return format_properties;
4206}
4207
4208const VkImageFormatProperties *GetImageFormatProperties(core_validation::layer_data *device_data, VkFormat format,
4209                                                        VkImageType image_type, VkImageTiling tiling, VkImageUsageFlags usage,
4210                                                        VkImageCreateFlags flags) {
4211    VkImageFormatProperties *image_format_properties = new VkImageFormatProperties;
4212    instance_layer_data *instance_data =
4213        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4214    instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(device_data->physical_device, format, image_type, tiling,
4215                                                                         usage, flags, image_format_properties);
4216    return image_format_properties;
4217}
4218
4219const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
4220
4221const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
4222    return &device_data->phys_dev_props;
4223}
4224
4225const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
4226
4227std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
4228    return &device_data->imageMap;
4229}
4230
4231std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
4232    return &device_data->imageSubresourceMap;
4233}
4234
4235std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
4236    return &device_data->imageLayoutMap;
4237}
4238
4239std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
4240    return &device_data->imageLayoutMap;
4241}
4242
4243std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
4244    return &device_data->bufferMap;
4245}
4246
4247std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
4248    return &device_data->bufferViewMap;
4249}
4250
4251std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
4252    return &device_data->imageViewMap;
4253}
4254
4255const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) {
4256    return &device_data->phys_dev_properties;
4257}
4258
4259const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) {
4260    return &device_data->enabled_features;
4261}
4262
4263const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
4264
4265VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
4266                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
4267    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4268    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4269    bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
4270    if (!skip) {
4271        result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
4272    }
4273    if (VK_SUCCESS == result) {
4274        std::lock_guard<std::mutex> lock(global_lock);
4275        PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
4276    }
4277    return result;
4278}
4279
4280VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
4281                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
4282    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4283    std::unique_lock<std::mutex> lock(global_lock);
4284    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
4285    lock.unlock();
4286    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4287    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
4288    if (VK_SUCCESS == result) {
4289        lock.lock();
4290        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
4291        lock.unlock();
4292    }
4293
4294    return result;
4295}
4296
4297VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
4298                                           const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
4299    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4300    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
4301    if (VK_SUCCESS == result) {
4302        std::lock_guard<std::mutex> lock(global_lock);
4303        auto &fence_node = dev_data->fenceMap[*pFence];
4304        fence_node.fence = *pFence;
4305        fence_node.createInfo = *pCreateInfo;
4306        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
4307    }
4308    return result;
4309}
4310
4311// TODO handle pipeline caches
4312VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
4313                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
4314    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4315    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
4316    return result;
4317}
4318
4319VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
4320                                                const VkAllocationCallbacks *pAllocator) {
4321    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4322    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
4323}
4324
4325VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
4326                                                    void *pData) {
4327    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4328    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
4329    return result;
4330}
4331
4332VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
4333                                                   const VkPipelineCache *pSrcCaches) {
4334    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4335    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
4336    return result;
4337}
4338
4339// utility function to set collective state for pipeline
4340void set_pipeline_state(PIPELINE_STATE *pPipe) {
4341    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
4342    if (pPipe->graphicsPipelineCI.pColorBlendState) {
4343        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
4344            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
4345                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4346                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4347                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4348                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4349                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4350                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4351                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4352                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
4353                    pPipe->blendConstantsEnabled = true;
4354                }
4355            }
4356        }
4357    }
4358}
4359
4360bool validate_dual_src_blend_feature(layer_data *device_data, PIPELINE_STATE *pipe_state) {
4361    bool skip = false;
4362    if (pipe_state->graphicsPipelineCI.pColorBlendState) {
4363        for (size_t i = 0; i < pipe_state->attachments.size(); ++i) {
4364            if (!device_data->enabled_features.dualSrcBlend) {
4365                if ((pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4366                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4367                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4368                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA) ||
4369                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4370                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4371                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4372                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
4373                    skip |=
4374                        log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
4375                                HandleToUint64(pipe_state->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
4376                                "CmdBindPipeline: vkPipeline (0x%" PRIxLEAST64 ") attachment[" PRINTF_SIZE_T_SPECIFIER
4377                                "] has a dual-source blend factor but this device feature is not enabled.",
4378                                HandleToUint64(pipe_state->pipeline), i);
4379                }
4380            }
4381        }
4382    }
4383    return skip;
4384}
4385
4386static bool PreCallCreateGraphicsPipelines(layer_data *device_data, uint32_t count,
4387                                           const VkGraphicsPipelineCreateInfo *create_infos, vector<PIPELINE_STATE *> &pipe_state) {
4388    bool skip = false;
4389    instance_layer_data *instance_data =
4390        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4391
4392    for (uint32_t i = 0; i < count; i++) {
4393        skip |= verifyPipelineCreateState(device_data, pipe_state, i);
4394        if (create_infos[i].pVertexInputState != NULL) {
4395            for (uint32_t j = 0; j < create_infos[i].pVertexInputState->vertexAttributeDescriptionCount; j++) {
4396                VkFormat format = create_infos[i].pVertexInputState->pVertexAttributeDescriptions[j].format;
4397                // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
4398                VkFormatProperties properties;
4399                instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &properties);
4400                if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
4401                    skip |= log_msg(
4402                        device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4403                        __LINE__, VALIDATION_ERROR_14a004de, "IMAGE",
4404                        "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
4405                        "(%s) is not a supported vertex buffer format. %s",
4406                        i, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_14a004de]);
4407                }
4408            }
4409        }
4410    }
4411    return skip;
4412}
4413
4414VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4415                                                       const VkGraphicsPipelineCreateInfo *pCreateInfos,
4416                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4417    // TODO What to do with pipelineCache?
4418    // The order of operations here is a little convoluted but gets the job done
4419    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
4420    //  2. Create state is then validated (which uses flags setup during shadowing)
4421    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
4422    bool skip = false;
4423    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
4424    vector<PIPELINE_STATE *> pipe_state(count);
4425    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4426
4427    uint32_t i = 0;
4428    std::unique_lock<std::mutex> lock(global_lock);
4429
4430    for (i = 0; i < count; i++) {
4431        pipe_state[i] = new PIPELINE_STATE;
4432        pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
4433        pipe_state[i]->render_pass_ci.initialize(GetRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
4434        pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4435    }
4436    skip |= PreCallCreateGraphicsPipelines(dev_data, count, pCreateInfos, pipe_state);
4437
4438    if (skip) {
4439        for (i = 0; i < count; i++) {
4440            delete pipe_state[i];
4441            pPipelines[i] = VK_NULL_HANDLE;
4442        }
4443        return VK_ERROR_VALIDATION_FAILED_EXT;
4444    }
4445
4446    lock.unlock();
4447    auto result =
4448        dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4449    lock.lock();
4450    for (i = 0; i < count; i++) {
4451        if (pPipelines[i] == VK_NULL_HANDLE) {
4452            delete pipe_state[i];
4453        } else {
4454            pipe_state[i]->pipeline = pPipelines[i];
4455            dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
4456        }
4457    }
4458
4459    return result;
4460}
4461
4462VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4463                                                      const VkComputePipelineCreateInfo *pCreateInfos,
4464                                                      const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4465    bool skip = false;
4466
4467    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
4468    vector<PIPELINE_STATE *> pPipeState(count);
4469    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4470
4471    uint32_t i = 0;
4472    std::unique_lock<std::mutex> lock(global_lock);
4473    for (i = 0; i < count; i++) {
4474        // TODO: Verify compute stage bits
4475
4476        // Create and initialize internal tracking data structure
4477        pPipeState[i] = new PIPELINE_STATE;
4478        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
4479        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4480
4481        // TODO: Add Compute Pipeline Verification
4482        skip |= validate_compute_pipeline(dev_data, pPipeState[i]);
4483        // skip |= verifyPipelineCreateState(dev_data, pPipeState[i]);
4484    }
4485
4486    if (skip) {
4487        for (i = 0; i < count; i++) {
4488            // Clean up any locally allocated data structures
4489            delete pPipeState[i];
4490            pPipelines[i] = VK_NULL_HANDLE;
4491        }
4492        return VK_ERROR_VALIDATION_FAILED_EXT;
4493    }
4494
4495    lock.unlock();
4496    auto result =
4497        dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4498    lock.lock();
4499    for (i = 0; i < count; i++) {
4500        if (pPipelines[i] == VK_NULL_HANDLE) {
4501            delete pPipeState[i];
4502        } else {
4503            pPipeState[i]->pipeline = pPipelines[i];
4504            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
4505        }
4506    }
4507
4508    return result;
4509}
4510
4511VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
4512                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
4513    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4514    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
4515    if (VK_SUCCESS == result) {
4516        std::lock_guard<std::mutex> lock(global_lock);
4517        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
4518    }
4519    return result;
4520}
4521
4522static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
4523    if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
4524    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
4525}
4526
4527static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
4528                                                    VkDescriptorSetLayout set_layout) {
4529    dev_data->descriptorSetLayoutMap[set_layout] = std::unique_ptr<cvdescriptorset::DescriptorSetLayout>(
4530        new cvdescriptorset::DescriptorSetLayout(create_info, set_layout));
4531}
4532
4533VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
4534                                                         const VkAllocationCallbacks *pAllocator,
4535                                                         VkDescriptorSetLayout *pSetLayout) {
4536    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4537    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4538    std::unique_lock<std::mutex> lock(global_lock);
4539    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
4540    if (!skip) {
4541        lock.unlock();
4542        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
4543        if (VK_SUCCESS == result) {
4544            lock.lock();
4545            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
4546        }
4547    }
4548    return result;
4549}
4550
4551// Used by CreatePipelineLayout and CmdPushConstants.
4552// Note that the index argument is optional and only used by CreatePipelineLayout.
4553static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
4554                                      const char *caller_name, uint32_t index = 0) {
4555    if (dev_data->instance_data->disabled.push_constant_range) return false;
4556    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
4557    bool skip = false;
4558    // Check that offset + size don't exceed the max.
4559    // Prevent arithetic overflow here by avoiding addition and testing in this order.
4560    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
4561        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
4562        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4563            if (offset >= maxPushConstantsSize) {
4564                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4565                                __LINE__, VALIDATION_ERROR_11a0024c, "DS",
4566                                "%s call has push constants index %u with offset %u that "
4567                                "exceeds this device's maxPushConstantSize of %u. %s",
4568                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_11a0024c]);
4569            }
4570            if (size > maxPushConstantsSize - offset) {
4571                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4572                                __LINE__, VALIDATION_ERROR_11a00254, "DS",
4573                                "%s call has push constants index %u with offset %u and size %u that "
4574                                "exceeds this device's maxPushConstantSize of %u. %s",
4575                                caller_name, index, offset, size, maxPushConstantsSize,
4576                                validation_error_map[VALIDATION_ERROR_11a00254]);
4577            }
4578        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4579            if (offset >= maxPushConstantsSize) {
4580                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4581                                __LINE__, VALIDATION_ERROR_1bc002e4, "DS",
4582                                "%s call has push constants index %u with offset %u that "
4583                                "exceeds this device's maxPushConstantSize of %u. %s",
4584                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_1bc002e4]);
4585            }
4586            if (size > maxPushConstantsSize - offset) {
4587                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4588                                __LINE__, VALIDATION_ERROR_1bc002e6, "DS",
4589                                "%s call has push constants index %u with offset %u and size %u that "
4590                                "exceeds this device's maxPushConstantSize of %u. %s",
4591                                caller_name, index, offset, size, maxPushConstantsSize,
4592                                validation_error_map[VALIDATION_ERROR_1bc002e6]);
4593            }
4594        } else {
4595            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4596                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4597        }
4598    }
4599    // size needs to be non-zero and a multiple of 4.
4600    if ((size == 0) || ((size & 0x3) != 0)) {
4601        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4602            if (size == 0) {
4603                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4604                                __LINE__, VALIDATION_ERROR_11a00250, "DS",
4605                                "%s call has push constants index %u with "
4606                                "size %u. Size must be greater than zero. %s",
4607                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_11a00250]);
4608            }
4609            if (size & 0x3) {
4610                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4611                                __LINE__, VALIDATION_ERROR_11a00252, "DS",
4612                                "%s call has push constants index %u with "
4613                                "size %u. Size must be a multiple of 4. %s",
4614                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_11a00252]);
4615            }
4616        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4617            if (size == 0) {
4618                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4619                                __LINE__, VALIDATION_ERROR_1bc2c21b, "DS",
4620                                "%s call has push constants index %u with "
4621                                "size %u. Size must be greater than zero. %s",
4622                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_1bc2c21b]);
4623            }
4624            if (size & 0x3) {
4625                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4626                                __LINE__, VALIDATION_ERROR_1bc002e2, "DS",
4627                                "%s call has push constants index %u with "
4628                                "size %u. Size must be a multiple of 4. %s",
4629                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_1bc002e2]);
4630            }
4631        } else {
4632            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4633                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4634        }
4635    }
4636    // offset needs to be a multiple of 4.
4637    if ((offset & 0x3) != 0) {
4638        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4639            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4640                            __LINE__, VALIDATION_ERROR_11a0024e, "DS",
4641                            "%s call has push constants index %u with "
4642                            "offset %u. Offset must be a multiple of 4. %s",
4643                            caller_name, index, offset, validation_error_map[VALIDATION_ERROR_11a0024e]);
4644        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4645            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4646                            __LINE__, VALIDATION_ERROR_1bc002e0, "DS",
4647                            "%s call has push constants with "
4648                            "offset %u. Offset must be a multiple of 4. %s",
4649                            caller_name, offset, validation_error_map[VALIDATION_ERROR_1bc002e0]);
4650        } else {
4651            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4652                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4653        }
4654    }
4655    return skip;
4656}
4657
4658VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
4659                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
4660    bool skip = false;
4661    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4662    // TODO : Add checks for VALIDATION_ERRORS 865-870
4663    // Push Constant Range checks
4664    uint32_t i, j;
4665    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4666        skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
4667                                          pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
4668        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
4669            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4670                            __LINE__, VALIDATION_ERROR_11a2dc03, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
4671                            validation_error_map[VALIDATION_ERROR_11a2dc03]);
4672        }
4673    }
4674    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4675
4676    // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
4677    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4678        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
4679            if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
4680                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4681                                __LINE__, VALIDATION_ERROR_0fe00248, "DS",
4682                                "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d. %s", i, j,
4683                                validation_error_map[VALIDATION_ERROR_0fe00248]);
4684            }
4685        }
4686    }
4687
4688    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
4689    if (VK_SUCCESS == result) {
4690        std::lock_guard<std::mutex> lock(global_lock);
4691        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
4692        plNode.layout = *pPipelineLayout;
4693        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
4694        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
4695            plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
4696        }
4697        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
4698        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4699            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
4700        }
4701    }
4702    return result;
4703}
4704
4705VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
4706                                                    const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
4707    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4708    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
4709    if (VK_SUCCESS == result) {
4710        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
4711        if (NULL == pNewNode) {
4712            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4713                        HandleToUint64(*pDescriptorPool), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4714                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
4715                return VK_ERROR_VALIDATION_FAILED_EXT;
4716        } else {
4717            std::lock_guard<std::mutex> lock(global_lock);
4718            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
4719        }
4720    } else {
4721        // Need to do anything if pool create fails?
4722    }
4723    return result;
4724}
4725
4726VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4727                                                   VkDescriptorPoolResetFlags flags) {
4728    // TODO : Add checks for VALIDATION_ERROR_32a00272
4729    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4730    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
4731    if (VK_SUCCESS == result) {
4732        std::lock_guard<std::mutex> lock(global_lock);
4733        clearDescriptorPool(dev_data, device, descriptorPool, flags);
4734    }
4735    return result;
4736}
4737// Ensure the pool contains enough descriptors and descriptor sets to satisfy
4738// an allocation request. Fills common_data with the total number of descriptors of each type required,
4739// as well as DescriptorSetLayout ptrs used for later update.
4740static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4741                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
4742    // Always update common data
4743    cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
4744    if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
4745    // All state checks for AllocateDescriptorSets is done in single function
4746    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
4747}
4748// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
4749static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4750                                                 VkDescriptorSet *pDescriptorSets,
4751                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
4752    // All the updates are contained in a single cvdescriptorset function
4753    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
4754                                                   &dev_data->setMap, dev_data);
4755}
4756
4757VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4758                                                      VkDescriptorSet *pDescriptorSets) {
4759    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4760    std::unique_lock<std::mutex> lock(global_lock);
4761    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
4762    bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
4763    lock.unlock();
4764
4765    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4766
4767    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
4768
4769    if (VK_SUCCESS == result) {
4770        lock.lock();
4771        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
4772        lock.unlock();
4773    }
4774    return result;
4775}
4776// Verify state before freeing DescriptorSets
4777static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
4778                                              const VkDescriptorSet *descriptor_sets) {
4779    if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
4780    bool skip = false;
4781    // First make sure sets being destroyed are not currently in-use
4782    for (uint32_t i = 0; i < count; ++i) {
4783        if (descriptor_sets[i] != VK_NULL_HANDLE) {
4784            skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
4785        }
4786    }
4787
4788    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
4789    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
4790        // Can't Free from a NON_FREE pool
4791        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4792                        HandleToUint64(pool), __LINE__, VALIDATION_ERROR_28600270, "DS",
4793                        "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
4794                        "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
4795                        validation_error_map[VALIDATION_ERROR_28600270]);
4796    }
4797    return skip;
4798}
4799// Sets have been removed from the pool so update underlying state
4800static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
4801                                             const VkDescriptorSet *descriptor_sets) {
4802    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
4803    // Update available descriptor sets in pool
4804    pool_state->availableSets += count;
4805
4806    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
4807    for (uint32_t i = 0; i < count; ++i) {
4808        if (descriptor_sets[i] != VK_NULL_HANDLE) {
4809            auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
4810            uint32_t type_index = 0, descriptor_count = 0;
4811            for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
4812                type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
4813                descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
4814                pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
4815            }
4816            freeDescriptorSet(dev_data, descriptor_set);
4817            pool_state->sets.erase(descriptor_set);
4818        }
4819    }
4820}
4821
4822VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
4823                                                  const VkDescriptorSet *pDescriptorSets) {
4824    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4825    // Make sure that no sets being destroyed are in-flight
4826    std::unique_lock<std::mutex> lock(global_lock);
4827    bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
4828    lock.unlock();
4829
4830    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4831    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
4832    if (VK_SUCCESS == result) {
4833        lock.lock();
4834        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
4835        lock.unlock();
4836    }
4837    return result;
4838}
4839// TODO : This is a Proof-of-concept for core validation architecture
4840//  Really we'll want to break out these functions to separate files but
4841//  keeping it all together here to prove out design
4842// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
4843static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
4844                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4845                                                const VkCopyDescriptorSet *pDescriptorCopies) {
4846    if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
4847    // First thing to do is perform map look-ups.
4848    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
4849    //  so we can't just do a single map look-up up-front, but do them individually in functions below
4850
4851    // Now make call(s) that validate state, but don't perform state updates in this function
4852    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
4853    //  namespace which will parse params and make calls into specific class instances
4854    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
4855                                                         descriptorCopyCount, pDescriptorCopies);
4856}
4857// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
4858static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
4859                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4860                                               const VkCopyDescriptorSet *pDescriptorCopies) {
4861    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4862                                                 pDescriptorCopies);
4863}
4864
4865VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
4866                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4867                                                const VkCopyDescriptorSet *pDescriptorCopies) {
4868    // Only map look-up at top level is for device-level layer_data
4869    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4870    std::unique_lock<std::mutex> lock(global_lock);
4871    bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4872                                                    pDescriptorCopies);
4873    lock.unlock();
4874    if (!skip) {
4875        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4876                                                      pDescriptorCopies);
4877        lock.lock();
4878        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
4879        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4880                                           pDescriptorCopies);
4881    }
4882}
4883
4884VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
4885                                                      VkCommandBuffer *pCommandBuffer) {
4886    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4887    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
4888    if (VK_SUCCESS == result) {
4889        std::unique_lock<std::mutex> lock(global_lock);
4890        auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
4891
4892        if (pPool) {
4893            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
4894                // Add command buffer to its commandPool map
4895                pPool->commandBuffers.push_back(pCommandBuffer[i]);
4896                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
4897                // Add command buffer to map
4898                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
4899                resetCB(dev_data, pCommandBuffer[i]);
4900                pCB->createInfo = *pCreateInfo;
4901                pCB->device = device;
4902            }
4903        }
4904        lock.unlock();
4905    }
4906    return result;
4907}
4908
4909// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
4910static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
4911    addCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
4912                            cb_state);
4913    for (auto attachment : fb_state->attachments) {
4914        auto view_state = attachment.view_state;
4915        if (view_state) {
4916            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
4917        }
4918        auto rp_state = GetRenderPassState(dev_data, fb_state->createInfo.renderPass);
4919        if (rp_state) {
4920            addCommandBufferBinding(&rp_state->cb_bindings, {HandleToUint64(rp_state->renderPass), kVulkanObjectTypeRenderPass},
4921                                    cb_state);
4922        }
4923    }
4924}
4925
4926VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
4927    bool skip = false;
4928    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
4929    std::unique_lock<std::mutex> lock(global_lock);
4930    // Validate command buffer level
4931    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
4932    if (cb_node) {
4933        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
4934        if (cb_node->in_use.load()) {
4935            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4936                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "MEM",
4937                            "Calling vkBeginCommandBuffer() on active command buffer %p before it has completed. "
4938                            "You must check command buffer fence before this call. %s",
4939                            commandBuffer, validation_error_map[VALIDATION_ERROR_16e00062]);
4940        }
4941        clear_cmd_buf_and_mem_references(dev_data, cb_node);
4942        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
4943            // Secondary Command Buffer
4944            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
4945            if (!pInfo) {
4946                skip |=
4947                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4948                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00066, "DS",
4949                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s", commandBuffer,
4950                            validation_error_map[VALIDATION_ERROR_16e00066]);
4951            } else {
4952                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
4953                    assert(pInfo->renderPass);
4954                    string errorString = "";
4955                    auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
4956                    if (framebuffer) {
4957                        if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
4958                            !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
4959                                                             GetRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
4960                                                             errorString)) {
4961                            // renderPass that framebuffer was created with must be compatible with local renderPass
4962                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4963                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4964                                            VALIDATION_ERROR_0280006e, "DS",
4965                                            "vkBeginCommandBuffer(): Secondary Command "
4966                                            "Buffer (0x%p) renderPass (0x%" PRIxLEAST64
4967                                            ") is incompatible w/ framebuffer "
4968                                            "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
4969                                            commandBuffer, HandleToUint64(pInfo->renderPass), HandleToUint64(pInfo->framebuffer),
4970                                            HandleToUint64(framebuffer->createInfo.renderPass), errorString.c_str(),
4971                                            validation_error_map[VALIDATION_ERROR_0280006e]);
4972                        }
4973                        // Connect this framebuffer and its children to this cmdBuffer
4974                        AddFramebufferBinding(dev_data, cb_node, framebuffer);
4975                    }
4976                }
4977                if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
4978                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
4979                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4980                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4981                                    VALIDATION_ERROR_16e00068, "DS",
4982                                    "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
4983                                    "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
4984                                    "support precise occlusion queries. %s",
4985                                    commandBuffer, validation_error_map[VALIDATION_ERROR_16e00068]);
4986                }
4987            }
4988            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
4989                auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
4990                if (renderPass) {
4991                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
4992                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4993                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4994                                        VALIDATION_ERROR_0280006c, "DS",
4995                                        "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
4996                                        "that is less than the number of subpasses (%d). %s",
4997                                        commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
4998                                        validation_error_map[VALIDATION_ERROR_0280006c]);
4999                    }
5000                }
5001            }
5002        }
5003        if (CB_RECORDING == cb_node->state) {
5004            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5005                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "DS",
5006                            "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
5007                            ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
5008                            commandBuffer, validation_error_map[VALIDATION_ERROR_16e00062]);
5009        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->last_cmd)) {
5010            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
5011            auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5012            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5013                skip |=
5014                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5015                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00064, "DS",
5016                            "Call to vkBeginCommandBuffer() on command buffer (0x%p"
5017                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
5018                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5019                            commandBuffer, HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_16e00064]);
5020            }
5021            resetCB(dev_data, commandBuffer);
5022        }
5023        // Set updated state here in case implicit reset occurs above
5024        cb_node->state = CB_RECORDING;
5025        cb_node->beginInfo = *pBeginInfo;
5026        if (cb_node->beginInfo.pInheritanceInfo) {
5027            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
5028            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
5029            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
5030            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
5031                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5032                cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
5033                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
5034                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
5035                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
5036            }
5037        }
5038    }
5039    lock.unlock();
5040    if (skip) {
5041        return VK_ERROR_VALIDATION_FAILED_EXT;
5042    }
5043    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
5044
5045    return result;
5046}
5047
5048VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
5049    bool skip = false;
5050    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5051    std::unique_lock<std::mutex> lock(global_lock);
5052    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5053    if (pCB) {
5054        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
5055            !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5056            // This needs spec clarification to update valid usage, see comments in PR:
5057            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
5058            skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_27400078);
5059        }
5060        skip |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
5061        UpdateCmdBufferLastCmd(pCB, CMD_END);
5062        for (auto query : pCB->activeQueries) {
5063            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5064                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_2740007a, "DS",
5065                            "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
5066                            HandleToUint64(query.pool), query.index, validation_error_map[VALIDATION_ERROR_2740007a]);
5067        }
5068    }
5069    if (!skip) {
5070        lock.unlock();
5071        auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
5072        lock.lock();
5073        if (VK_SUCCESS == result) {
5074            pCB->state = CB_RECORDED;
5075        }
5076        return result;
5077    } else {
5078        return VK_ERROR_VALIDATION_FAILED_EXT;
5079    }
5080}
5081
5082VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
5083    bool skip = false;
5084    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5085    std::unique_lock<std::mutex> lock(global_lock);
5086    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5087    VkCommandPool cmdPool = pCB->createInfo.commandPool;
5088    auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5089    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5090        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5091                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_3260005c, "DS",
5092                        "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
5093                        ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5094                        commandBuffer, HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_3260005c]);
5095    }
5096    skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_3260005a);
5097    lock.unlock();
5098    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5099    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
5100    if (VK_SUCCESS == result) {
5101        lock.lock();
5102        resetCB(dev_data, commandBuffer);
5103        lock.unlock();
5104    }
5105    return result;
5106}
5107
5108VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5109                                           VkPipeline pipeline) {
5110    bool skip = false;
5111    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5112    std::unique_lock<std::mutex> lock(global_lock);
5113    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5114    if (cb_state) {
5115        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5116                                      VALIDATION_ERROR_18002415);
5117        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
5118        UpdateCmdBufferLastCmd(cb_state, CMD_BINDPIPELINE);
5119        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
5120            skip |=
5121                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5122                        HandleToUint64(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
5123                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
5124                        HandleToUint64(pipeline), HandleToUint64(cb_state->activeRenderPass->renderPass));
5125        }
5126        // TODO: VALIDATION_ERROR_18000612 VALIDATION_ERROR_18000616
5127
5128        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
5129        if (pipe_state) {
5130            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
5131            set_cb_pso_status(cb_state, pipe_state);
5132            set_pipeline_state(pipe_state);
5133            skip |= validate_dual_src_blend_feature(dev_data, pipe_state);
5134        } else {
5135            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5136                            HandleToUint64(pipeline), __LINE__, VALIDATION_ERROR_18027e01, "DS",
5137                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", HandleToUint64(pipeline),
5138                            validation_error_map[VALIDATION_ERROR_18027e01]);
5139        }
5140        addCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
5141        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
5142            // Add binding for child renderpass
5143            auto rp_state = GetRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
5144            if (rp_state) {
5145                addCommandBufferBinding(&rp_state->cb_bindings, {HandleToUint64(rp_state->renderPass), kVulkanObjectTypeRenderPass},
5146                                        cb_state);
5147            }
5148        }
5149    }
5150    lock.unlock();
5151    if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
5152}
5153
5154VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
5155                                          const VkViewport *pViewports) {
5156    bool skip = false;
5157    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5158    std::unique_lock<std::mutex> lock(global_lock);
5159    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5160    if (pCB) {
5161        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1e002415);
5162        skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
5163        UpdateCmdBufferLastCmd(pCB, CMD_SETVIEWPORTSTATE);
5164        pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
5165    }
5166    lock.unlock();
5167    if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
5168}
5169
5170VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
5171                                         const VkRect2D *pScissors) {
5172    bool skip = false;
5173    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5174    std::unique_lock<std::mutex> lock(global_lock);
5175    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5176    if (pCB) {
5177        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d802415);
5178        skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
5179        UpdateCmdBufferLastCmd(pCB, CMD_SETSCISSORSTATE);
5180        pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
5181    }
5182    lock.unlock();
5183    if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
5184}
5185
5186VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
5187    bool skip = false;
5188    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5189    std::unique_lock<std::mutex> lock(global_lock);
5190    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5191    if (pCB) {
5192        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d602415);
5193        skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
5194        UpdateCmdBufferLastCmd(pCB, CMD_SETLINEWIDTHSTATE);
5195        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
5196
5197        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
5198        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
5199            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5200                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1d600626, "DS",
5201                            "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
5202                            "flag.  This is undefined behavior and could be ignored. %s",
5203                            validation_error_map[VALIDATION_ERROR_1d600626]);
5204        } else {
5205            skip |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, kVulkanObjectTypeCommandBuffer, HandleToUint64(commandBuffer),
5206                                    lineWidth);
5207        }
5208    }
5209    lock.unlock();
5210    if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
5211}
5212
5213VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
5214                                           float depthBiasSlopeFactor) {
5215    bool skip = false;
5216    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5217    std::unique_lock<std::mutex> lock(global_lock);
5218    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5219    if (pCB) {
5220        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1cc02415);
5221        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
5222        if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
5223            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5224                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1cc0062c, "DS",
5225                            "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
5226                            "parameter must be set to 0.0. %s",
5227                            validation_error_map[VALIDATION_ERROR_1cc0062c]);
5228        }
5229        if (!skip) {
5230            UpdateCmdBufferLastCmd(pCB, CMD_SETDEPTHBIASSTATE);
5231            pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
5232        }
5233    }
5234    lock.unlock();
5235    if (!skip)
5236        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
5237}
5238
5239VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
5240    bool skip = false;
5241    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5242    std::unique_lock<std::mutex> lock(global_lock);
5243    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5244    if (pCB) {
5245        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ca02415);
5246        skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
5247        UpdateCmdBufferLastCmd(pCB, CMD_SETBLENDSTATE);
5248        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
5249    }
5250    lock.unlock();
5251    if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
5252}
5253
5254VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
5255    bool skip = false;
5256    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5257    std::unique_lock<std::mutex> lock(global_lock);
5258    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5259    if (pCB) {
5260        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ce02415);
5261        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
5262        UpdateCmdBufferLastCmd(pCB, CMD_SETDEPTHBOUNDSSTATE);
5263        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
5264    }
5265    lock.unlock();
5266    if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
5267}
5268
5269VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
5270                                                    uint32_t compareMask) {
5271    bool skip = false;
5272    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5273    std::unique_lock<std::mutex> lock(global_lock);
5274    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5275    if (pCB) {
5276        skip |=
5277            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1da02415);
5278        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
5279        UpdateCmdBufferLastCmd(pCB, CMD_SETSTENCILREADMASKSTATE);
5280        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
5281    }
5282    lock.unlock();
5283    if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
5284}
5285
5286VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
5287    bool skip = false;
5288    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5289    std::unique_lock<std::mutex> lock(global_lock);
5290    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5291    if (pCB) {
5292        skip |=
5293            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1de02415);
5294        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
5295        UpdateCmdBufferLastCmd(pCB, CMD_SETSTENCILWRITEMASKSTATE);
5296        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
5297    }
5298    lock.unlock();
5299    if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
5300}
5301
5302VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
5303    bool skip = false;
5304    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5305    std::unique_lock<std::mutex> lock(global_lock);
5306    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5307    if (pCB) {
5308        skip |=
5309            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1dc02415);
5310        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
5311        UpdateCmdBufferLastCmd(pCB, CMD_SETSTENCILREFERENCESTATE);
5312        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
5313    }
5314    lock.unlock();
5315    if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
5316}
5317
5318VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5319                                                 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
5320                                                 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
5321                                                 const uint32_t *pDynamicOffsets) {
5322    bool skip = false;
5323    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5324    std::unique_lock<std::mutex> lock(global_lock);
5325    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5326    if (cb_state) {
5327        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5328                                      VALIDATION_ERROR_17c02415);
5329        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
5330        // Track total count of dynamic descriptor types to make sure we have an offset for each one
5331        uint32_t total_dynamic_descriptors = 0;
5332        string error_string = "";
5333        uint32_t last_set_index = firstSet + setCount - 1;
5334        if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
5335            cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
5336            cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
5337        }
5338        auto old_final_bound_set = cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index];
5339        auto pipeline_layout = getPipelineLayout(dev_data, layout);
5340        for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
5341            cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(dev_data, pDescriptorSets[set_idx]);
5342            if (descriptor_set) {
5343                cb_state->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
5344                cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[set_idx + firstSet] = descriptor_set;
5345                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
5346                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]), __LINE__,
5347                                DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
5348                                HandleToUint64(pDescriptorSets[set_idx]), string_VkPipelineBindPoint(pipelineBindPoint));
5349                if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
5350                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
5351                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
5352                                    __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
5353                                    "Descriptor Set 0x%" PRIxLEAST64
5354                                    " bound but it was never updated. You may want to either update it or not bind it.",
5355                                    HandleToUint64(pDescriptorSets[set_idx]));
5356                }
5357                // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
5358                if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
5359                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5360                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
5361                                    __LINE__, VALIDATION_ERROR_17c002cc, "DS",
5362                                    "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
5363                                    "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
5364                                    set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str(),
5365                                    validation_error_map[VALIDATION_ERROR_17c002cc]);
5366                }
5367
5368                auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
5369
5370                cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx].clear();
5371
5372                if (set_dynamic_descriptor_count) {
5373                    // First make sure we won't overstep bounds of pDynamicOffsets array
5374                    if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
5375                        skip |= log_msg(
5376                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5377                            HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
5378                            "descriptorSet #%u (0x%" PRIxLEAST64
5379                            ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
5380                            "array. There must be one dynamic offset for each dynamic descriptor being bound.",
5381                            set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
5382                            (dynamicOffsetCount - total_dynamic_descriptors));
5383                    } else {  // Validate and store dynamic offsets with the set
5384                        // Validate Dynamic Offset Minimums
5385                        uint32_t cur_dyn_offset = total_dynamic_descriptors;
5386                        for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
5387                            if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
5388                                if (SafeModulo(
5389                                        pDynamicOffsets[cur_dyn_offset],
5390                                        dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
5391                                    skip |=
5392                                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5393                                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
5394                                                VALIDATION_ERROR_17c002d4, "DS",
5395                                                "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
5396                                                "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
5397                                                cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
5398                                                dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5399                                                validation_error_map[VALIDATION_ERROR_17c002d4]);
5400                                }
5401                                cur_dyn_offset++;
5402                            } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
5403                                if (SafeModulo(
5404                                        pDynamicOffsets[cur_dyn_offset],
5405                                        dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
5406                                    skip |=
5407                                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5408                                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
5409                                                VALIDATION_ERROR_17c002d4, "DS",
5410                                                "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
5411                                                "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
5412                                                cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
5413                                                dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
5414                                                validation_error_map[VALIDATION_ERROR_17c002d4]);
5415                                }
5416                                cur_dyn_offset++;
5417                            }
5418                        }
5419
5420                        cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx] =
5421                            std::vector<uint32_t>(pDynamicOffsets + total_dynamic_descriptors,
5422                                                  pDynamicOffsets + total_dynamic_descriptors + set_dynamic_descriptor_count);
5423                        // Keep running total of dynamic descriptor count to verify at the end
5424                        total_dynamic_descriptors += set_dynamic_descriptor_count;
5425                    }
5426                }
5427            } else {
5428                skip |=
5429                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5430                            HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_SET, "DS",
5431                            "Attempt to bind descriptor set 0x%" PRIxLEAST64 " that doesn't exist!",
5432                            HandleToUint64(pDescriptorSets[set_idx]));
5433            }
5434            UpdateCmdBufferLastCmd(cb_state, CMD_BINDDESCRIPTORSETS);
5435            // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
5436            if (firstSet > 0) {  // Check set #s below the first bound set
5437                for (uint32_t i = 0; i < firstSet; ++i) {
5438                    if (cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
5439                        !verify_set_layout_compatibility(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i],
5440                                                         pipeline_layout, i, error_string)) {
5441                        skip |= log_msg(
5442                            dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
5443                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5444                            HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i]), __LINE__, DRAWSTATE_NONE,
5445                            "DS", "DescriptorSet 0x%" PRIxLEAST64
5446                                  " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
5447                            HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i]), i,
5448                            HandleToUint64(layout));
5449                        cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
5450                    }
5451                }
5452            }
5453            // Check if newly last bound set invalidates any remaining bound sets
5454            if ((cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (last_set_index)) {
5455                if (old_final_bound_set &&
5456                    !verify_set_layout_compatibility(old_final_bound_set, pipeline_layout, last_set_index, error_string)) {
5457                    auto old_set = old_final_bound_set->GetSet();
5458                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
5459                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(old_set), __LINE__,
5460                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
5461                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
5462                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
5463                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
5464                                    HandleToUint64(old_set), last_set_index,
5465                                    HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index]),
5466                                    last_set_index, last_set_index + 1, HandleToUint64(layout));
5467                    cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
5468                }
5469            }
5470        }
5471        //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
5472        if (total_dynamic_descriptors != dynamicOffsetCount) {
5473            skip |=
5474                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5475                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_17c002ce, "DS",
5476                        "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
5477                        "is %u. It should exactly match the number of dynamic descriptors. %s",
5478                        setCount, total_dynamic_descriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_17c002ce]);
5479        }
5480    }
5481    lock.unlock();
5482    if (!skip)
5483        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
5484                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
5485}
5486
5487VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5488                                              VkIndexType indexType) {
5489    bool skip = false;
5490    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5491    // TODO : Somewhere need to verify that IBs have correct usage state flagged
5492    std::unique_lock<std::mutex> lock(global_lock);
5493
5494    auto buffer_state = GetBufferState(dev_data, buffer);
5495    auto cb_node = GetCBNode(dev_data, commandBuffer);
5496    if (cb_node && buffer_state) {
5497        skip |=
5498            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_17e02415);
5499        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
5500        skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_17e00364);
5501        std::function<bool()> function = [=]() {
5502            return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
5503        };
5504        cb_node->validate_functions.push_back(function);
5505        UpdateCmdBufferLastCmd(cb_node, CMD_BINDINDEXBUFFER);
5506        VkDeviceSize offset_align = 0;
5507        switch (indexType) {
5508            case VK_INDEX_TYPE_UINT16:
5509                offset_align = 2;
5510                break;
5511            case VK_INDEX_TYPE_UINT32:
5512                offset_align = 4;
5513                break;
5514            default:
5515                // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
5516                break;
5517        }
5518        if (!offset_align || (offset % offset_align)) {
5519            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5520                            HandleToUint64(commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
5521                            "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
5522                            string_VkIndexType(indexType));
5523        }
5524        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
5525    } else {
5526        assert(0);
5527    }
5528    lock.unlock();
5529    if (!skip) dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
5530}
5531
5532void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
5533    uint32_t end = firstBinding + bindingCount;
5534    if (pCB->currentDrawData.buffers.size() < end) {
5535        pCB->currentDrawData.buffers.resize(end);
5536    }
5537    for (uint32_t i = 0; i < bindingCount; ++i) {
5538        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
5539    }
5540}
5541
5542static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
5543
5544VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
5545                                                const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
5546    bool skip = false;
5547    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5548    // TODO : Somewhere need to verify that VBs have correct usage state flagged
5549    std::unique_lock<std::mutex> lock(global_lock);
5550
5551    auto cb_node = GetCBNode(dev_data, commandBuffer);
5552    if (cb_node) {
5553        skip |=
5554            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_18202415);
5555        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffers()");
5556        for (uint32_t i = 0; i < bindingCount; ++i) {
5557            auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
5558            assert(buffer_state);
5559            skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_182004e8);
5560            std::function<bool()> function = [=]() {
5561                return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
5562            };
5563            cb_node->validate_functions.push_back(function);
5564            if (pOffsets[i] >= buffer_state->createInfo.size) {
5565                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5566                                HandleToUint64(buffer_state->buffer), __LINE__, VALIDATION_ERROR_182004e4, "DS",
5567                                "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer. %s",
5568                                pOffsets[i], validation_error_map[VALIDATION_ERROR_182004e4]);
5569            }
5570        }
5571        UpdateCmdBufferLastCmd(cb_node, CMD_BINDVERTEXBUFFER);
5572        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
5573    } else {
5574        assert(0);
5575    }
5576    lock.unlock();
5577    if (!skip) dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
5578}
5579
5580// Expects global_lock to be held by caller
5581static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5582    for (auto imageView : pCB->updateImages) {
5583        auto view_state = GetImageViewState(dev_data, imageView);
5584        if (!view_state) continue;
5585
5586        auto image_state = GetImageState(dev_data, view_state->create_info.image);
5587        assert(image_state);
5588        std::function<bool()> function = [=]() {
5589            SetImageMemoryValid(dev_data, image_state, true);
5590            return false;
5591        };
5592        pCB->validate_functions.push_back(function);
5593    }
5594    for (auto buffer : pCB->updateBuffers) {
5595        auto buffer_state = GetBufferState(dev_data, buffer);
5596        assert(buffer_state);
5597        std::function<bool()> function = [=]() {
5598            SetBufferMemoryValid(dev_data, buffer_state, true);
5599            return false;
5600        };
5601        pCB->validate_functions.push_back(function);
5602    }
5603}
5604
5605// Generic function to handle validation for all CmdDraw* type functions
5606static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
5607                                CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
5608                                UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
5609                                UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
5610    bool skip = false;
5611    *cb_state = GetCBNode(dev_data, cmd_buffer);
5612    if (*cb_state) {
5613        skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
5614        skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
5615        skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
5616        skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
5617                                                                : insideRenderPass(dev_data, *cb_state, caller, msg_code);
5618    }
5619    return skip;
5620}
5621
5622// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
5623static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5624                                           CMD_TYPE cmd_type) {
5625    UpdateDrawState(dev_data, cb_state, bind_point);
5626    MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
5627    UpdateCmdBufferLastCmd(cb_state, cmd_type);
5628}
5629
5630// Generic function to handle state update for all CmdDraw* type functions
5631static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5632                                   CMD_TYPE cmd_type) {
5633    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, cmd_type);
5634    updateResourceTrackingOnDraw(cb_state);
5635    cb_state->hasDrawCmd = true;
5636}
5637
5638static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
5639                                   GLOBAL_CB_NODE **cb_state, const char *caller) {
5640    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5641                               VALIDATION_ERROR_1a202415, VALIDATION_ERROR_1a200017, VALIDATION_ERROR_1a200376);
5642}
5643
5644static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5645    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAW);
5646}
5647
5648VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
5649                                   uint32_t firstVertex, uint32_t firstInstance) {
5650    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5651    GLOBAL_CB_NODE *cb_state = nullptr;
5652    std::unique_lock<std::mutex> lock(global_lock);
5653    bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
5654    lock.unlock();
5655    if (!skip) {
5656        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
5657        lock.lock();
5658        PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
5659        lock.unlock();
5660    }
5661}
5662
5663static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
5664                                          VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
5665    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5666                               VALIDATION_ERROR_1a402415, VALIDATION_ERROR_1a400017, VALIDATION_ERROR_1a40039c);
5667}
5668
5669static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5670    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXED);
5671}
5672
5673VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
5674                                          uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
5675    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5676    GLOBAL_CB_NODE *cb_state = nullptr;
5677    std::unique_lock<std::mutex> lock(global_lock);
5678    bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
5679                                              "vkCmdDrawIndexed()");
5680    lock.unlock();
5681    if (!skip) {
5682        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
5683        lock.lock();
5684        PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
5685        lock.unlock();
5686    }
5687}
5688
5689static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5690                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
5691                                           const char *caller) {
5692    bool skip =
5693        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5694                            VALIDATION_ERROR_1aa02415, VALIDATION_ERROR_1aa00017, VALIDATION_ERROR_1aa003cc);
5695    *buffer_state = GetBufferState(dev_data, buffer);
5696    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1aa003b4);
5697    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
5698    // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
5699    return skip;
5700}
5701
5702static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5703                                          BUFFER_STATE *buffer_state) {
5704    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDIRECT);
5705    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5706}
5707
5708VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
5709                                           uint32_t stride) {
5710    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5711    GLOBAL_CB_NODE *cb_state = nullptr;
5712    BUFFER_STATE *buffer_state = nullptr;
5713    std::unique_lock<std::mutex> lock(global_lock);
5714    bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
5715                                               &buffer_state, "vkCmdDrawIndirect()");
5716    lock.unlock();
5717    if (!skip) {
5718        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
5719        lock.lock();
5720        PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
5721        lock.unlock();
5722    }
5723}
5724
5725static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5726                                                  VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
5727                                                  BUFFER_STATE **buffer_state, const char *caller) {
5728    bool skip =
5729        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
5730                            VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1a602415, VALIDATION_ERROR_1a600017, VALIDATION_ERROR_1a600434);
5731    *buffer_state = GetBufferState(dev_data, buffer);
5732    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a60041c);
5733    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
5734    // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
5735    // 'buffer'.
5736    return skip;
5737}
5738
5739static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5740                                                 BUFFER_STATE *buffer_state) {
5741    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXEDINDIRECT);
5742    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5743}
5744
5745VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5746                                                  uint32_t count, uint32_t stride) {
5747    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5748    GLOBAL_CB_NODE *cb_state = nullptr;
5749    BUFFER_STATE *buffer_state = nullptr;
5750    std::unique_lock<std::mutex> lock(global_lock);
5751    bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
5752                                                      &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
5753    lock.unlock();
5754    if (!skip) {
5755        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
5756        lock.lock();
5757        PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
5758        lock.unlock();
5759    }
5760}
5761
5762static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
5763                                       VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
5764    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
5765                               VALIDATION_ERROR_19c02415, VALIDATION_ERROR_19c00017, VALIDATION_ERROR_UNDEFINED);
5766}
5767
5768static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5769    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCH);
5770}
5771
5772VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
5773    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5774    GLOBAL_CB_NODE *cb_state = nullptr;
5775    std::unique_lock<std::mutex> lock(global_lock);
5776    bool skip =
5777        PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
5778    lock.unlock();
5779    if (!skip) {
5780        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
5781        lock.lock();
5782        PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
5783        lock.unlock();
5784    }
5785}
5786
5787static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5788                                               VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
5789                                               BUFFER_STATE **buffer_state, const char *caller) {
5790    bool skip =
5791        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
5792                            VALIDATION_ERROR_1a002415, VALIDATION_ERROR_1a000017, VALIDATION_ERROR_UNDEFINED);
5793    *buffer_state = GetBufferState(dev_data, buffer);
5794    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a000322);
5795    return skip;
5796}
5797
5798static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5799                                              BUFFER_STATE *buffer_state) {
5800    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCHINDIRECT);
5801    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5802}
5803
5804VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
5805    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5806    GLOBAL_CB_NODE *cb_state = nullptr;
5807    BUFFER_STATE *buffer_state = nullptr;
5808    std::unique_lock<std::mutex> lock(global_lock);
5809    bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
5810                                                   &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
5811    lock.unlock();
5812    if (!skip) {
5813        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
5814        lock.lock();
5815        PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
5816        lock.unlock();
5817    }
5818}
5819
5820VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
5821                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
5822    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5823    std::unique_lock<std::mutex> lock(global_lock);
5824
5825    auto cb_node = GetCBNode(device_data, commandBuffer);
5826    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
5827    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
5828
5829    if (cb_node && src_buffer_state && dst_buffer_state) {
5830        bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
5831        if (!skip) {
5832            PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
5833            lock.unlock();
5834            device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
5835        }
5836    } else {
5837        lock.unlock();
5838        assert(0);
5839    }
5840}
5841
5842VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5843                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5844                                        const VkImageCopy *pRegions) {
5845    bool skip = false;
5846    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5847    std::unique_lock<std::mutex> lock(global_lock);
5848
5849    auto cb_node = GetCBNode(device_data, commandBuffer);
5850    auto src_image_state = GetImageState(device_data, srcImage);
5851    auto dst_image_state = GetImageState(device_data, dstImage);
5852    if (cb_node && src_image_state && dst_image_state) {
5853        skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
5854                                           srcImageLayout, dstImageLayout);
5855        if (!skip) {
5856            PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
5857                                      dstImageLayout);
5858            lock.unlock();
5859            device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5860                                                     pRegions);
5861        }
5862    } else {
5863        lock.unlock();
5864        assert(0);
5865    }
5866}
5867
5868// Validate that an image's sampleCount matches the requirement for a specific API call
5869bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
5870                              const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
5871    bool skip = false;
5872    if (image_state->createInfo.samples != sample_count) {
5873        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
5874                       HandleToUint64(image_state->image), 0, msgCode, "DS",
5875                       "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
5876                       HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
5877                       string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
5878    }
5879    return skip;
5880}
5881
5882VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5883                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5884                                        const VkImageBlit *pRegions, VkFilter filter) {
5885    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5886    std::unique_lock<std::mutex> lock(global_lock);
5887
5888    auto cb_node = GetCBNode(dev_data, commandBuffer);
5889    auto src_image_state = GetImageState(dev_data, srcImage);
5890    auto dst_image_state = GetImageState(dev_data, dstImage);
5891
5892    bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, filter);
5893
5894    if (!skip) {
5895        PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state);
5896        lock.unlock();
5897        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5898                                              pRegions, filter);
5899    }
5900}
5901
5902VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
5903                                                VkImageLayout dstImageLayout, uint32_t regionCount,
5904                                                const VkBufferImageCopy *pRegions) {
5905    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5906    std::unique_lock<std::mutex> lock(global_lock);
5907    bool skip = false;
5908    auto cb_node = GetCBNode(device_data, commandBuffer);
5909    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
5910    auto dst_image_state = GetImageState(device_data, dstImage);
5911    if (cb_node && src_buffer_state && dst_image_state) {
5912        skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
5913                                                        regionCount, pRegions, "vkCmdCopyBufferToImage()");
5914    } else {
5915        lock.unlock();
5916        assert(0);
5917        // TODO: report VU01244 here, or put in object tracker?
5918    }
5919    if (!skip) {
5920        PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
5921                                          dstImageLayout);
5922        lock.unlock();
5923        device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
5924    }
5925}
5926
5927VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5928                                                VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
5929    bool skip = false;
5930    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5931    std::unique_lock<std::mutex> lock(global_lock);
5932
5933    auto cb_node = GetCBNode(device_data, commandBuffer);
5934    auto src_image_state = GetImageState(device_data, srcImage);
5935    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
5936    if (cb_node && src_image_state && dst_buffer_state) {
5937        skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
5938                                                        regionCount, pRegions, "vkCmdCopyImageToBuffer()");
5939    } else {
5940        lock.unlock();
5941        assert(0);
5942        // TODO: report VU01262 here, or put in object tracker?
5943    }
5944    if (!skip) {
5945        PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
5946                                          srcImageLayout);
5947        lock.unlock();
5948        device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
5949    }
5950}
5951
5952VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5953                                           VkDeviceSize dataSize, const uint32_t *pData) {
5954    bool skip = false;
5955    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5956    std::unique_lock<std::mutex> lock(global_lock);
5957
5958    auto cb_node = GetCBNode(dev_data, commandBuffer);
5959    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
5960    if (cb_node && dst_buff_state) {
5961        skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400046);
5962        // Update bindings between buffer and cmd buffer
5963        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
5964        // Validate that DST buffer has correct usage flags set
5965        skip |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
5966                                         VALIDATION_ERROR_1e400044, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
5967        std::function<bool()> function = [=]() {
5968            SetBufferMemoryValid(dev_data, dst_buff_state, true);
5969            return false;
5970        };
5971        cb_node->validate_functions.push_back(function);
5972
5973        skip |=
5974            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdUpdateBuffer()",
5975                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1e402415);
5976        skip |= ValidateCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
5977        UpdateCmdBufferLastCmd(cb_node, CMD_UPDATEBUFFER);
5978        skip |= insideRenderPass(dev_data, cb_node, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400017);
5979    } else {
5980        assert(0);
5981    }
5982    lock.unlock();
5983    if (!skip) dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
5984}
5985
5986VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5987                                         VkDeviceSize size, uint32_t data) {
5988    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5989    std::unique_lock<std::mutex> lock(global_lock);
5990    auto cb_node = GetCBNode(device_data, commandBuffer);
5991    auto buffer_state = GetBufferState(device_data, dstBuffer);
5992
5993    if (cb_node && buffer_state) {
5994        bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
5995        if (!skip) {
5996            PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
5997            lock.unlock();
5998            device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
5999        }
6000    } else {
6001        lock.unlock();
6002        assert(0);
6003    }
6004}
6005
6006VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
6007                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
6008                                               const VkClearRect *pRects) {
6009    bool skip = false;
6010    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6011    {
6012        std::lock_guard<std::mutex> lock(global_lock);
6013        skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6014    }
6015    if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6016}
6017
6018VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6019                                              const VkClearColorValue *pColor, uint32_t rangeCount,
6020                                              const VkImageSubresourceRange *pRanges) {
6021    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6022    std::unique_lock<std::mutex> lock(global_lock);
6023
6024    bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6025    if (!skip) {
6026        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges, CMD_CLEARCOLORIMAGE);
6027        lock.unlock();
6028        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
6029    }
6030}
6031
6032VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6033                                                     const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
6034                                                     const VkImageSubresourceRange *pRanges) {
6035    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6036    std::unique_lock<std::mutex> lock(global_lock);
6037
6038    bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6039    if (!skip) {
6040        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges, CMD_CLEARDEPTHSTENCILIMAGE);
6041        lock.unlock();
6042        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
6043    }
6044}
6045
6046VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6047                                           VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
6048                                           const VkImageResolve *pRegions) {
6049    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6050    std::unique_lock<std::mutex> lock(global_lock);
6051
6052    auto cb_node = GetCBNode(dev_data, commandBuffer);
6053    auto src_image_state = GetImageState(dev_data, srcImage);
6054    auto dst_image_state = GetImageState(dev_data, dstImage);
6055
6056    bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
6057
6058    if (!skip) {
6059        PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
6060        lock.unlock();
6061        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
6062                                                 pRegions);
6063    }
6064}
6065
6066VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
6067                                                     VkSubresourceLayout *pLayout) {
6068    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6069
6070    bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
6071    if (!skip) {
6072        device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
6073    }
6074}
6075
6076bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6077    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6078    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6079    if (pCB) {
6080        pCB->eventToStageMap[event] = stageMask;
6081    }
6082    auto queue_data = dev_data->queueMap.find(queue);
6083    if (queue_data != dev_data->queueMap.end()) {
6084        queue_data->second.eventToStageMap[event] = stageMask;
6085    }
6086    return false;
6087}
6088
6089VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6090    bool skip = false;
6091    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6092    std::unique_lock<std::mutex> lock(global_lock);
6093    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6094    if (pCB) {
6095        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6096                                      VALIDATION_ERROR_1d402415);
6097        skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
6098        UpdateCmdBufferLastCmd(pCB, CMD_SETEVENT);
6099        skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_1d400017);
6100        skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_1d4008fc,
6101                                             VALIDATION_ERROR_1d4008fe);
6102        auto event_state = GetEventNode(dev_data, event);
6103        if (event_state) {
6104            addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6105            event_state->cb_bindings.insert(pCB);
6106        }
6107        pCB->events.push_back(event);
6108        if (!pCB->waitedEvents.count(event)) {
6109            pCB->writeEventsBeforeWait.push_back(event);
6110        }
6111        std::function<bool(VkQueue)> eventUpdate =
6112            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
6113        pCB->eventUpdates.push_back(eventUpdate);
6114    }
6115    lock.unlock();
6116    if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
6117}
6118
6119VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6120    bool skip = false;
6121    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6122    std::unique_lock<std::mutex> lock(global_lock);
6123    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6124    if (pCB) {
6125        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6126                                      VALIDATION_ERROR_1c402415);
6127        skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
6128        UpdateCmdBufferLastCmd(pCB, CMD_RESETEVENT);
6129        skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_1c400017);
6130        skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_1c400904,
6131                                             VALIDATION_ERROR_1c400906);
6132        auto event_state = GetEventNode(dev_data, event);
6133        if (event_state) {
6134            addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6135            event_state->cb_bindings.insert(pCB);
6136        }
6137        pCB->events.push_back(event);
6138        if (!pCB->waitedEvents.count(event)) {
6139            pCB->writeEventsBeforeWait.push_back(event);
6140        }
6141        // TODO : Add check for VALIDATION_ERROR_32c008f8
6142        std::function<bool(VkQueue)> eventUpdate =
6143            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
6144        pCB->eventUpdates.push_back(eventUpdate);
6145    }
6146    lock.unlock();
6147    if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
6148}
6149
6150static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
6151                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
6152                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
6153                             const VkImageMemoryBarrier *pImageMemBarriers) {
6154    bool skip = false;
6155    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(cmdBuffer), layer_data_map);
6156    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, cmdBuffer);
6157    if (pCB->activeRenderPass && memBarrierCount) {
6158        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
6159            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6160                            HandleToUint64(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6161                            "%s: Barriers cannot be set during subpass %d "
6162                            "with no self dependency specified.",
6163                            funcName, pCB->activeSubpass);
6164        }
6165    }
6166    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
6167        auto mem_barrier = &pImageMemBarriers[i];
6168        auto image_data = GetImageState(dev_data, mem_barrier->image);
6169        if (image_data) {
6170            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
6171            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
6172            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
6173                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
6174                // be VK_QUEUE_FAMILY_IGNORED
6175                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
6176                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6177                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cmdBuffer), __LINE__,
6178                                    DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image Barrier for image 0x%" PRIx64
6179                                                                         " was created with sharingMode of "
6180                                                                         "VK_SHARING_MODE_CONCURRENT. Src and dst "
6181                                                                         "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
6182                                    funcName, HandleToUint64(mem_barrier->image));
6183                }
6184            } else {
6185                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
6186                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
6187                // or both be a valid queue family
6188                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
6189                    (src_q_f_index != dst_q_f_index)) {
6190                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6191                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cmdBuffer), __LINE__,
6192                                    DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64
6193                                                                         " was created with sharingMode "
6194                                                                         "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
6195                                                                         "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
6196                                                                         "must be.",
6197                                    funcName, HandleToUint64(mem_barrier->image));
6198                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
6199                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
6200                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
6201                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6202                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cmdBuffer), __LINE__,
6203                                    DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6204                                    "%s: Image 0x%" PRIx64
6205                                    " was created with sharingMode "
6206                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
6207                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
6208                                    "queueFamilies crated for this device.",
6209                                    funcName, HandleToUint64(mem_barrier->image), src_q_f_index, dst_q_f_index,
6210                                    dev_data->phys_dev_properties.queue_family_properties.size());
6211                }
6212            }
6213        }
6214
6215        if (mem_barrier->oldLayout != mem_barrier->newLayout) {
6216            if (pCB->activeRenderPass) {
6217                skip |=
6218                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6219                            HandleToUint64(cmdBuffer), __LINE__, VALIDATION_ERROR_1b80093a, "DS",
6220                            "%s: As the Image Barrier for image 0x%" PRIx64
6221                            " is being executed within a render pass instance, oldLayout must equal newLayout yet they are "
6222                            "%s and %s. %s",
6223                            funcName, HandleToUint64(mem_barrier->image), string_VkImageLayout(mem_barrier->oldLayout),
6224                            string_VkImageLayout(mem_barrier->newLayout), validation_error_map[VALIDATION_ERROR_1b80093a]);
6225            }
6226            skip |= ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
6227            skip |= ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
6228        }
6229        if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
6230            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6231                            HandleToUint64(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6232                            "%s: Image Layout cannot be transitioned to UNDEFINED or "
6233                            "PREINITIALIZED.",
6234                            funcName);
6235        }
6236        if (image_data) {
6237            auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
6238            skip |= ValidateImageAspectMask(dev_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
6239
6240            std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
6241            skip |= ValidateImageSubresourceRange(dev_data, image_data, false, mem_barrier->subresourceRange, funcName,
6242                                                  param_name.c_str());
6243        }
6244    }
6245
6246    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
6247        auto mem_barrier = &pBufferMemBarriers[i];
6248        if (pCB->activeRenderPass) {
6249            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6250                            HandleToUint64(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6251                            "%s: Buffer Barriers cannot be used during a render pass.", funcName);
6252        }
6253        if (!mem_barrier) continue;
6254
6255        // Validate buffer barrier queue family indices
6256        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
6257             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
6258            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
6259             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
6260            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6261                            HandleToUint64(cmdBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6262                            "%s: Buffer Barrier 0x%" PRIx64
6263                            " has QueueFamilyIndex greater "
6264                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
6265                            funcName, HandleToUint64(mem_barrier->buffer),
6266                            dev_data->phys_dev_properties.queue_family_properties.size());
6267        }
6268
6269        auto buffer_state = GetBufferState(dev_data, mem_barrier->buffer);
6270        if (buffer_state) {
6271            auto buffer_size = buffer_state->requirements.size;
6272            if (mem_barrier->offset >= buffer_size) {
6273                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6274                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cmdBuffer), __LINE__,
6275                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64
6276                                                                 " which is not less than total size 0x%" PRIx64 ".",
6277                                funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
6278                                HandleToUint64(buffer_size));
6279            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
6280                skip |=
6281                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6282                            HandleToUint64(cmdBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6283                            "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
6284                            " whose sum is greater than total size 0x%" PRIx64 ".",
6285                            funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
6286                            HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
6287            }
6288        }
6289    }
6290    return skip;
6291}
6292
6293bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
6294                            VkPipelineStageFlags sourceStageMask) {
6295    bool skip = false;
6296    VkPipelineStageFlags stageMask = 0;
6297    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
6298    for (uint32_t i = 0; i < eventCount; ++i) {
6299        auto event = pCB->events[firstEventIndex + i];
6300        auto queue_data = dev_data->queueMap.find(queue);
6301        if (queue_data == dev_data->queueMap.end()) return false;
6302        auto event_data = queue_data->second.eventToStageMap.find(event);
6303        if (event_data != queue_data->second.eventToStageMap.end()) {
6304            stageMask |= event_data->second;
6305        } else {
6306            auto global_event_data = GetEventNode(dev_data, event);
6307            if (!global_event_data) {
6308                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
6309                                HandleToUint64(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
6310                                "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
6311            } else {
6312                stageMask |= global_event_data->stageMask;
6313            }
6314        }
6315    }
6316    // TODO: Need to validate that host_bit is only set if set event is called
6317    // but set event can be called at any time.
6318    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
6319        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6320                        HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1e62d401, "DS",
6321                        "Submitting cmdbuffer with call to VkCmdWaitEvents "
6322                        "using srcStageMask 0x%X which must be the bitwise "
6323                        "OR of the stageMask parameters used in calls to "
6324                        "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
6325                        "used with vkSetEvent but instead is 0x%X. %s",
6326                        sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_1e62d401]);
6327    }
6328    return skip;
6329}
6330
6331// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
6332static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
6333    {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
6334    {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
6335    {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
6336    {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6337    {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6338    {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6339    {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6340    {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6341    {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
6342    {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
6343    {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
6344    {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
6345    {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
6346    {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
6347
6348static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
6349                                                            VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
6350                                                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
6351                                                            VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
6352                                                            VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
6353                                                            VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
6354                                                            VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
6355                                                            VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
6356                                                            VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
6357                                                            VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
6358                                                            VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
6359                                                            VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
6360                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
6361                                                            VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
6362
6363bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
6364                                      VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
6365                                      UNIQUE_VALIDATION_ERROR_CODE error_code) {
6366    bool skip = false;
6367    // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
6368    for (const auto &item : stage_flag_bit_array) {
6369        if (stage_mask & item) {
6370            if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
6371                skip |=
6372                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6373                            HandleToUint64(command_buffer), __LINE__, error_code, "DL",
6374                            "%s(): %s flag %s is not compatible with the queue family properties of this "
6375                            "command buffer. %s",
6376                            function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
6377                            validation_error_map[error_code]);
6378            }
6379        }
6380    }
6381    return skip;
6382}
6383
6384bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
6385                                                VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
6386                                                const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
6387    bool skip = false;
6388    uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
6389    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
6390    auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
6391
6392    // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
6393    // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
6394    // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
6395
6396    if (queue_family_index < physical_device_state->queue_family_properties.size()) {
6397        VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
6398
6399        if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
6400            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
6401                                                     function, "srcStageMask", error_code);
6402        }
6403        if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
6404            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
6405                                                     function, "dstStageMask", error_code);
6406        }
6407    }
6408    return skip;
6409}
6410
6411VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6412                                         VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
6413                                         uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6414                                         uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6415                                         uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6416    bool skip = false;
6417    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6418    std::unique_lock<std::mutex> lock(global_lock);
6419    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6420    if (cb_state) {
6421        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
6422                                                           VALIDATION_ERROR_1e600918);
6423        skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e60090e,
6424                                             VALIDATION_ERROR_1e600912);
6425        skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e600910,
6426                                             VALIDATION_ERROR_1e600914);
6427        auto first_event_index = cb_state->events.size();
6428        for (uint32_t i = 0; i < eventCount; ++i) {
6429            auto event_state = GetEventNode(dev_data, pEvents[i]);
6430            if (event_state) {
6431                addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent}, cb_state);
6432                event_state->cb_bindings.insert(cb_state);
6433            }
6434            cb_state->waitedEvents.insert(pEvents[i]);
6435            cb_state->events.push_back(pEvents[i]);
6436        }
6437        std::function<bool(VkQueue)> event_update =
6438            std::bind(validateEventStageMask, std::placeholders::_1, cb_state, eventCount, first_event_index, sourceStageMask);
6439        cb_state->eventUpdates.push_back(event_update);
6440        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6441                                      VALIDATION_ERROR_1e602415);
6442        skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
6443        UpdateCmdBufferLastCmd(cb_state, CMD_WAITEVENTS);
6444        skip |=
6445            ValidateBarriersToImages(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
6446        if (!skip) {
6447            TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6448        }
6449
6450        skip |= ValidateBarriers("vkCmdWaitEvents()", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6451                                 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6452    }
6453    lock.unlock();
6454    if (!skip)
6455        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
6456                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6457                                               imageMemoryBarrierCount, pImageMemoryBarriers);
6458}
6459
6460static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
6461                                              VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6462                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6463                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6464                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6465    bool skip = false;
6466    skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
6467                                                       VALIDATION_ERROR_1b80093e);
6468    skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
6469                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b802415);
6470    skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
6471    skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800920,
6472                                         VALIDATION_ERROR_1b800924);
6473    skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800922,
6474                                         VALIDATION_ERROR_1b800926);
6475    skip |= ValidateBarriersToImages(device_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers,
6476                                     "vkCmdPipelineBarrier()");
6477    skip |= ValidateBarriers("vkCmdPipelineBarrier()", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6478                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6479    return skip;
6480}
6481
6482static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
6483                                            uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6484    UpdateCmdBufferLastCmd(cb_state, CMD_PIPELINEBARRIER);
6485    TransitionImageLayouts(device_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6486}
6487
6488VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
6489                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
6490                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6491                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6492                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6493    bool skip = false;
6494    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6495    std::unique_lock<std::mutex> lock(global_lock);
6496    GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
6497    if (cb_state) {
6498        skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, commandBuffer, srcStageMask, dstStageMask,
6499                                                  memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6500                                                  pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6501        if (!skip) {
6502            PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6503        }
6504    } else {
6505        assert(0);
6506    }
6507    lock.unlock();
6508    if (!skip) {
6509        device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
6510                                                       pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6511                                                       imageMemoryBarrierCount, pImageMemoryBarriers);
6512    }
6513}
6514
6515bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
6516    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6517    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6518    if (pCB) {
6519        pCB->queryToStateMap[object] = value;
6520    }
6521    auto queue_data = dev_data->queueMap.find(queue);
6522    if (queue_data != dev_data->queueMap.end()) {
6523        queue_data->second.queryToStateMap[object] = value;
6524    }
6525    return false;
6526}
6527
6528VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
6529    bool skip = false;
6530    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6531    std::unique_lock<std::mutex> lock(global_lock);
6532    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6533    if (pCB) {
6534        QueryObject query = {queryPool, slot};
6535        pCB->activeQueries.insert(query);
6536        if (!pCB->startedQueries.count(query)) {
6537            pCB->startedQueries.insert(query);
6538        }
6539        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6540                                      VALIDATION_ERROR_17802415);
6541        skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
6542        UpdateCmdBufferLastCmd(pCB, CMD_BEGINQUERY);
6543        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6544                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
6545    }
6546    lock.unlock();
6547    if (!skip) dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
6548}
6549
6550VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
6551    bool skip = false;
6552    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6553    std::unique_lock<std::mutex> lock(global_lock);
6554    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6555    if (cb_state) {
6556        QueryObject query = {queryPool, slot};
6557        if (!cb_state->activeQueries.count(query)) {
6558            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6559                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1ae00652, "DS",
6560                            "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
6561                            HandleToUint64(queryPool), slot, validation_error_map[VALIDATION_ERROR_1ae00652]);
6562        } else {
6563            cb_state->activeQueries.erase(query);
6564        }
6565        std::function<bool(VkQueue)> query_update = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
6566        cb_state->queryUpdates.push_back(query_update);
6567        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6568                                      VALIDATION_ERROR_1ae02415);
6569        skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
6570        UpdateCmdBufferLastCmd(cb_state, CMD_ENDQUERY);
6571        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6572                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
6573    }
6574    lock.unlock();
6575    if (!skip) dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
6576}
6577
6578VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
6579                                             uint32_t queryCount) {
6580    bool skip = false;
6581    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6582    std::unique_lock<std::mutex> lock(global_lock);
6583    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6584        skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_1c600017);
6585        skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
6586        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6587                                      VALIDATION_ERROR_1c602415);
6588    lock.unlock();
6589
6590    if (skip) return;
6591
6592    dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
6593
6594    lock.lock();
6595    for (uint32_t i = 0; i < queryCount; i++) {
6596        QueryObject query = {queryPool, firstQuery + i};
6597        cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
6598        std::function<bool(VkQueue)> query_update =
6599            std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
6600        cb_state->queryUpdates.push_back(query_update);
6601    }
6602    UpdateCmdBufferLastCmd(cb_state, CMD_RESETQUERYPOOL);
6603    addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6604                            {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
6605}
6606
6607bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
6608    bool skip = false;
6609    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
6610    auto queue_data = dev_data->queueMap.find(queue);
6611    if (queue_data == dev_data->queueMap.end()) return false;
6612    for (uint32_t i = 0; i < queryCount; i++) {
6613        QueryObject query = {queryPool, firstQuery + i};
6614        auto query_data = queue_data->second.queryToStateMap.find(query);
6615        bool fail = false;
6616        if (query_data != queue_data->second.queryToStateMap.end()) {
6617            if (!query_data->second) {
6618                fail = true;
6619            }
6620        } else {
6621            auto global_query_data = dev_data->queryToStateMap.find(query);
6622            if (global_query_data != dev_data->queryToStateMap.end()) {
6623                if (!global_query_data->second) {
6624                    fail = true;
6625                }
6626            } else {
6627                fail = true;
6628            }
6629        }
6630        if (fail) {
6631            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6632                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
6633                            "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
6634                            HandleToUint64(queryPool), firstQuery + i);
6635        }
6636    }
6637    return skip;
6638}
6639
6640VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
6641                                                   uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
6642                                                   VkDeviceSize stride, VkQueryResultFlags flags) {
6643    bool skip = false;
6644    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6645    std::unique_lock<std::mutex> lock(global_lock);
6646
6647    auto cb_node = GetCBNode(dev_data, commandBuffer);
6648    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
6649    if (cb_node && dst_buff_state) {
6650        skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400674);
6651        // Update bindings between buffer and cmd buffer
6652        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
6653        // Validate that DST buffer has correct usage flags set
6654        skip |=
6655            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_19400672,
6656                                     "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6657        std::function<bool()> function = [=]() {
6658            SetBufferMemoryValid(dev_data, dst_buff_state, true);
6659            return false;
6660        };
6661        cb_node->validate_functions.push_back(function);
6662        std::function<bool(VkQueue)> query_update =
6663            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
6664        cb_node->queryUpdates.push_back(query_update);
6665        skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
6666                                      VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_19402415);
6667        skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
6668        UpdateCmdBufferLastCmd(cb_node, CMD_COPYQUERYPOOLRESULTS);
6669        skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400017);
6670        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6671                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
6672    } else {
6673        assert(0);
6674    }
6675    lock.unlock();
6676    if (!skip)
6677        dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
6678                                                         stride, flags);
6679}
6680
6681VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
6682                                            uint32_t offset, uint32_t size, const void *pValues) {
6683    bool skip = false;
6684    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6685    std::unique_lock<std::mutex> lock(global_lock);
6686    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6687    if (cb_state) {
6688        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6689                                      VALIDATION_ERROR_1bc02415);
6690        skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
6691        UpdateCmdBufferLastCmd(cb_state, CMD_PUSHCONSTANTS);
6692    }
6693    skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
6694    if (0 == stageFlags) {
6695        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6696                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc2dc03, "DS",
6697                        "vkCmdPushConstants() call has no stageFlags set. %s", validation_error_map[VALIDATION_ERROR_1bc2dc03]);
6698    }
6699
6700    // Check if specified push constant range falls within a pipeline-defined range which has matching stageFlags.
6701    // The spec doesn't seem to disallow having multiple push constant ranges with the
6702    // same offset and size, but different stageFlags.  So we can't just check the
6703    // stageFlags in the first range with matching offset and size.
6704    if (!skip) {
6705        const auto &ranges = getPipelineLayout(dev_data, layout)->push_constant_ranges;
6706        bool found_matching_range = false;
6707        for (const auto &range : ranges) {
6708            if ((stageFlags == range.stageFlags) && (offset >= range.offset) && (offset + size <= range.offset + range.size)) {
6709                found_matching_range = true;
6710                break;
6711            }
6712        }
6713        if (!found_matching_range) {
6714            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6715                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc002de, "DS",
6716                            "vkCmdPushConstants() stageFlags = 0x%" PRIx32
6717                            " do not match the stageFlags in any of the ranges with"
6718                            " offset = %d and size = %d in pipeline layout 0x%" PRIx64 ". %s",
6719                            (uint32_t)stageFlags, offset, size, HandleToUint64(layout),
6720                            validation_error_map[VALIDATION_ERROR_1bc002de]);
6721        }
6722    }
6723    lock.unlock();
6724    if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
6725}
6726
6727VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
6728                                             VkQueryPool queryPool, uint32_t slot) {
6729    bool skip = false;
6730    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6731    std::unique_lock<std::mutex> lock(global_lock);
6732    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6733    if (cb_state) {
6734        QueryObject query = {queryPool, slot};
6735        std::function<bool(VkQueue)> query_update = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
6736        cb_state->queryUpdates.push_back(query_update);
6737        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6738                                      VALIDATION_ERROR_1e802415);
6739        skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
6740        UpdateCmdBufferLastCmd(cb_state, CMD_WRITETIMESTAMP);
6741    }
6742    lock.unlock();
6743    if (!skip) dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
6744}
6745
6746static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
6747                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
6748                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
6749    bool skip = false;
6750
6751    for (uint32_t attach = 0; attach < count; attach++) {
6752        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
6753            // Attachment counts are verified elsewhere, but prevent an invalid access
6754            if (attachments[attach].attachment < fbci->attachmentCount) {
6755                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
6756                auto view_state = GetImageViewState(dev_data, *image_view);
6757                if (view_state) {
6758                    const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
6759                    if (ici != nullptr) {
6760                        if ((ici->usage & usage_flag) == 0) {
6761                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6762                                            VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, error_code, "DS",
6763                                            "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
6764                                            "IMAGE_USAGE flags (%s). %s",
6765                                            attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
6766                                            validation_error_map[error_code]);
6767                        }
6768                    }
6769                }
6770            }
6771        }
6772    }
6773    return skip;
6774}
6775
6776// Validate VkFramebufferCreateInfo which includes:
6777// 1. attachmentCount equals renderPass attachmentCount
6778// 2. corresponding framebuffer and renderpass attachments have matching formats
6779// 3. corresponding framebuffer and renderpass attachments have matching sample counts
6780// 4. fb attachments only have a single mip level
6781// 5. fb attachment dimensions are each at least as large as the fb
6782// 6. fb attachments use idenity swizzle
6783// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
6784// 8. fb dimensions are within physical device limits
6785static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
6786    bool skip = false;
6787
6788    auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
6789    if (rp_state) {
6790        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
6791        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
6792            skip |= log_msg(
6793                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6794                HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006d8, "DS",
6795                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
6796                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
6797                pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass),
6798                validation_error_map[VALIDATION_ERROR_094006d8]);
6799        } else {
6800            // attachmentCounts match, so make sure corresponding attachment details line up
6801            const VkImageView *image_views = pCreateInfo->pAttachments;
6802            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
6803                auto view_state = GetImageViewState(dev_data, image_views[i]);
6804                auto &ivci = view_state->create_info;
6805                if (ivci.format != rpci->pAttachments[i].format) {
6806                    skip |= log_msg(
6807                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6808                        HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e0, "DS",
6809                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
6810                        "the format of "
6811                        "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
6812                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
6813                        HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e0]);
6814                }
6815                const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
6816                if (ici->samples != rpci->pAttachments[i].samples) {
6817                    skip |= log_msg(
6818                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6819                        HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e2, "DS",
6820                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
6821                        "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
6822                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
6823                        HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e2]);
6824                }
6825                // Verify that view only has a single mip level
6826                if (ivci.subresourceRange.levelCount != 1) {
6827                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
6828                                    0, __LINE__, VALIDATION_ERROR_094006e6, "DS",
6829                                    "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
6830                                    "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
6831                                    i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_094006e6]);
6832                }
6833                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
6834                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
6835                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
6836                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
6837                    (mip_height < pCreateInfo->height)) {
6838                    skip |= log_msg(
6839                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6840                        VALIDATION_ERROR_094006e4, "DS",
6841                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
6842                        "than the corresponding framebuffer dimensions. Here are the respective dimensions for attachment #%u, "
6843                        "framebuffer:\n"
6844                        "width: %u, %u\n"
6845                        "height: %u, %u\n"
6846                        "layerCount: %u, %u\n%s",
6847                        i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height, pCreateInfo->height,
6848                        ivci.subresourceRange.layerCount, pCreateInfo->layers, validation_error_map[VALIDATION_ERROR_094006e4]);
6849                }
6850                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
6851                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
6852                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
6853                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
6854                    skip |= log_msg(
6855                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6856                        VALIDATION_ERROR_094006e8, "DS",
6857                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
6858                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
6859                        "r swizzle = %s\n"
6860                        "g swizzle = %s\n"
6861                        "b swizzle = %s\n"
6862                        "a swizzle = %s\n"
6863                        "%s",
6864                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
6865                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
6866                        validation_error_map[VALIDATION_ERROR_094006e8]);
6867                }
6868            }
6869        }
6870        // Verify correct attachment usage flags
6871        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
6872            // Verify input attachments:
6873            skip |=
6874                MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
6875                           pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_094006de);
6876            // Verify color attachments:
6877            skip |=
6878                MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
6879                           pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_094006da);
6880            // Verify depth/stencil attachments:
6881            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
6882                skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
6883                                   VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_094006dc);
6884            }
6885        }
6886    }
6887    // Verify FB dimensions are within physical device limits
6888    if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
6889        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6890                        VALIDATION_ERROR_094006ec, "DS",
6891                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
6892                        "Requested width: %u, device max: %u\n"
6893                        "%s",
6894                        pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
6895                        validation_error_map[VALIDATION_ERROR_094006ec]);
6896    }
6897    if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
6898        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6899                        VALIDATION_ERROR_094006f0, "DS",
6900                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
6901                        "Requested height: %u, device max: %u\n"
6902                        "%s",
6903                        pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
6904                        validation_error_map[VALIDATION_ERROR_094006f0]);
6905    }
6906    if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
6907        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6908                        VALIDATION_ERROR_094006f4, "DS",
6909                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
6910                        "Requested layers: %u, device max: %u\n"
6911                        "%s",
6912                        pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
6913                        validation_error_map[VALIDATION_ERROR_094006f4]);
6914    }
6915    // Verify FB dimensions are greater than zero
6916    if (pCreateInfo->width <= 0) {
6917        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6918                        VALIDATION_ERROR_094006ea, "DS",
6919                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero. %s",
6920                        validation_error_map[VALIDATION_ERROR_094006ea]);
6921    }
6922    if (pCreateInfo->height <= 0) {
6923        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6924                        VALIDATION_ERROR_094006ee, "DS",
6925                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero. %s",
6926                        validation_error_map[VALIDATION_ERROR_094006ee]);
6927    }
6928    if (pCreateInfo->layers <= 0) {
6929        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6930                        VALIDATION_ERROR_094006f2, "DS",
6931                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero. %s",
6932                        validation_error_map[VALIDATION_ERROR_094006f2]);
6933    }
6934    return skip;
6935}
6936
6937// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
6938//  Return true if an error is encountered and callback returns true to skip call down chain
6939//   false indicates that call down chain should proceed
6940static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
6941    // TODO : Verify that renderPass FB is created with is compatible with FB
6942    bool skip = false;
6943    skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
6944    return skip;
6945}
6946
6947// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
6948static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
6949    // Shadow create info and store in map
6950    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
6951        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
6952
6953    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
6954        VkImageView view = pCreateInfo->pAttachments[i];
6955        auto view_state = GetImageViewState(dev_data, view);
6956        if (!view_state) {
6957            continue;
6958        }
6959        MT_FB_ATTACHMENT_INFO fb_info;
6960        fb_info.view_state = view_state;
6961        fb_info.image = view_state->create_info.image;
6962        fb_state->attachments.push_back(fb_info);
6963    }
6964    dev_data->frameBufferMap[fb] = std::move(fb_state);
6965}
6966
6967VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
6968                                                 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
6969    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6970    std::unique_lock<std::mutex> lock(global_lock);
6971    bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
6972    lock.unlock();
6973
6974    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6975
6976    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
6977
6978    if (VK_SUCCESS == result) {
6979        lock.lock();
6980        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
6981        lock.unlock();
6982    }
6983    return result;
6984}
6985
6986static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
6987                           std::unordered_set<uint32_t> &processed_nodes) {
6988    // If we have already checked this node we have not found a dependency path so return false.
6989    if (processed_nodes.count(index)) return false;
6990    processed_nodes.insert(index);
6991    const DAGNode &node = subpass_to_node[index];
6992    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
6993    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
6994        for (auto elem : node.prev) {
6995            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
6996        }
6997    } else {
6998        return true;
6999    }
7000    return false;
7001}
7002
7003static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
7004                                  const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
7005                                  bool &skip) {
7006    bool result = true;
7007    // Loop through all subpasses that share the same attachment and make sure a dependency exists
7008    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
7009        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
7010        const DAGNode &node = subpass_to_node[subpass];
7011        // Check for a specified dependency between the two nodes. If one exists we are done.
7012        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
7013        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
7014        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
7015            // If no dependency exits an implicit dependency still might. If not, throw an error.
7016            std::unordered_set<uint32_t> processed_nodes;
7017            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
7018                  FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
7019                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7020                                __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7021                                "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
7022                                dependent_subpasses[k]);
7023                result = false;
7024            }
7025        }
7026    }
7027    return result;
7028}
7029
7030static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
7031                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
7032    const DAGNode &node = subpass_to_node[index];
7033    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
7034    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
7035    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7036        if (attachment == subpass.pColorAttachments[j].attachment) return true;
7037    }
7038    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7039        if (attachment == subpass.pInputAttachments[j].attachment) return true;
7040    }
7041    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7042        if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
7043    }
7044    bool result = false;
7045    // Loop through previous nodes and see if any of them write to the attachment.
7046    for (auto elem : node.prev) {
7047        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
7048    }
7049    // If the attachment was written to by a previous node than this node needs to preserve it.
7050    if (result && depth > 0) {
7051        bool has_preserved = false;
7052        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
7053            if (subpass.pPreserveAttachments[j] == attachment) {
7054                has_preserved = true;
7055                break;
7056            }
7057        }
7058        if (!has_preserved) {
7059            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7060                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7061                            "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
7062        }
7063    }
7064    return result;
7065}
7066
7067template <class T>
7068bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
7069    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
7070           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
7071}
7072
7073bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
7074    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
7075            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
7076}
7077
7078static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
7079                                 RENDER_PASS_STATE const *renderPass) {
7080    bool skip = false;
7081    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
7082    auto const pCreateInfo = renderPass->createInfo.ptr();
7083    auto const &subpass_to_node = renderPass->subpassToNode;
7084    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
7085    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
7086    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
7087    // Find overlapping attachments
7088    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
7089        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
7090            VkImageView viewi = pFramebufferInfo->pAttachments[i];
7091            VkImageView viewj = pFramebufferInfo->pAttachments[j];
7092            if (viewi == viewj) {
7093                overlapping_attachments[i].push_back(j);
7094                overlapping_attachments[j].push_back(i);
7095                continue;
7096            }
7097            auto view_state_i = GetImageViewState(dev_data, viewi);
7098            auto view_state_j = GetImageViewState(dev_data, viewj);
7099            if (!view_state_i || !view_state_j) {
7100                continue;
7101            }
7102            auto view_ci_i = view_state_i->create_info;
7103            auto view_ci_j = view_state_j->create_info;
7104            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
7105                overlapping_attachments[i].push_back(j);
7106                overlapping_attachments[j].push_back(i);
7107                continue;
7108            }
7109            auto image_data_i = GetImageState(dev_data, view_ci_i.image);
7110            auto image_data_j = GetImageState(dev_data, view_ci_j.image);
7111            if (!image_data_i || !image_data_j) {
7112                continue;
7113            }
7114            if (image_data_i->binding.mem == image_data_j->binding.mem &&
7115                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
7116                                   image_data_j->binding.size)) {
7117                overlapping_attachments[i].push_back(j);
7118                overlapping_attachments[j].push_back(i);
7119            }
7120        }
7121    }
7122    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
7123        uint32_t attachment = i;
7124        for (auto other_attachment : overlapping_attachments[i]) {
7125            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
7126                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7127                                HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
7128                                "Attachment %d aliases attachment %d but doesn't "
7129                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
7130                                attachment, other_attachment, validation_error_map[VALIDATION_ERROR_12200682]);
7131            }
7132            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
7133                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7134                                HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
7135                                "Attachment %d aliases attachment %d but doesn't "
7136                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
7137                                other_attachment, attachment, validation_error_map[VALIDATION_ERROR_12200682]);
7138            }
7139        }
7140    }
7141    // Find for each attachment the subpasses that use them.
7142    unordered_set<uint32_t> attachmentIndices;
7143    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7144        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7145        attachmentIndices.clear();
7146        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7147            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7148            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7149            input_attachment_to_subpass[attachment].push_back(i);
7150            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7151                input_attachment_to_subpass[overlapping_attachment].push_back(i);
7152            }
7153        }
7154        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7155            uint32_t attachment = subpass.pColorAttachments[j].attachment;
7156            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7157            output_attachment_to_subpass[attachment].push_back(i);
7158            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7159                output_attachment_to_subpass[overlapping_attachment].push_back(i);
7160            }
7161            attachmentIndices.insert(attachment);
7162        }
7163        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7164            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
7165            output_attachment_to_subpass[attachment].push_back(i);
7166            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7167                output_attachment_to_subpass[overlapping_attachment].push_back(i);
7168            }
7169
7170            if (attachmentIndices.count(attachment)) {
7171                skip |=
7172                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7173                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7174                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
7175            }
7176        }
7177    }
7178    // If there is a dependency needed make sure one exists
7179    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7180        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7181        // If the attachment is an input then all subpasses that output must have a dependency relationship
7182        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7183            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7184            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7185            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7186        }
7187        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
7188        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7189            uint32_t attachment = subpass.pColorAttachments[j].attachment;
7190            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7191            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7192            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
7193        }
7194        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7195            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
7196            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7197            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
7198        }
7199    }
7200    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
7201    // written.
7202    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7203        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7204        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7205            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
7206        }
7207    }
7208    return skip;
7209}
7210
7211static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
7212                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
7213    bool skip = false;
7214    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7215        DAGNode &subpass_node = subpass_to_node[i];
7216        subpass_node.pass = i;
7217    }
7218    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
7219        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
7220        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
7221            if (dependency.srcSubpass == dependency.dstSubpass) {
7222                skip |=
7223                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7224                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
7225            }
7226        } else if (dependency.srcSubpass > dependency.dstSubpass) {
7227            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7228                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7229                            "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
7230        } else if (dependency.srcSubpass == dependency.dstSubpass) {
7231            has_self_dependency[dependency.srcSubpass] = true;
7232        } else {
7233            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
7234            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
7235        }
7236    }
7237    return skip;
7238}
7239
7240VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
7241                                                  const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
7242    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7243    bool spirv_valid;
7244
7245    if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid))
7246        return VK_ERROR_VALIDATION_FAILED_EXT;
7247
7248    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
7249
7250    if (res == VK_SUCCESS) {
7251        std::lock_guard<std::mutex> lock(global_lock);
7252        unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo) : new shader_module());
7253        dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
7254    }
7255    return res;
7256}
7257
7258static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
7259    bool skip = false;
7260    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
7261        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7262                        VALIDATION_ERROR_12200684, "DS",
7263                        "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s", type,
7264                        attachment, attachment_count, validation_error_map[VALIDATION_ERROR_12200684]);
7265    }
7266    return skip;
7267}
7268
7269static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
7270
7271static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
7272    bool skip = false;
7273    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7274        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7275        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
7276            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7277                            __LINE__, VALIDATION_ERROR_14000698, "DS",
7278                            "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s", i,
7279                            validation_error_map[VALIDATION_ERROR_14000698]);
7280        }
7281
7282        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
7283            uint32_t attachment = subpass.pPreserveAttachments[j];
7284            if (attachment == VK_ATTACHMENT_UNUSED) {
7285                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7286                                __LINE__, VALIDATION_ERROR_140006aa, "DS",
7287                                "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
7288                                validation_error_map[VALIDATION_ERROR_140006aa]);
7289            } else {
7290                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
7291
7292                bool found = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment);
7293                for (uint32_t r = 0; !found && r < subpass.inputAttachmentCount; ++r) {
7294                    found = (subpass.pInputAttachments[r].attachment == attachment);
7295                }
7296                for (uint32_t r = 0; !found && r < subpass.colorAttachmentCount; ++r) {
7297                    found = (subpass.pColorAttachments[r].attachment == attachment) ||
7298                            (subpass.pResolveAttachments != NULL && subpass.pResolveAttachments[r].attachment == attachment);
7299                }
7300                if (found) {
7301                    skip |= log_msg(
7302                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7303                        VALIDATION_ERROR_140006ac, "DS",
7304                        "CreateRenderPass: subpass %u pPreserveAttachments[%u] (%u) must not be used elsewhere in the subpass. %s",
7305                        i, j, attachment, validation_error_map[VALIDATION_ERROR_140006ac]);
7306                }
7307            }
7308        }
7309
7310        auto subpass_performs_resolve =
7311            subpass.pResolveAttachments &&
7312            std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
7313                        [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
7314
7315        unsigned sample_count = 0;
7316
7317        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7318            uint32_t attachment;
7319            if (subpass.pResolveAttachments) {
7320                attachment = subpass.pResolveAttachments[j].attachment;
7321                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
7322
7323                if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
7324                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
7325                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7326                                    0, __LINE__, VALIDATION_ERROR_140006a2, "DS",
7327                                    "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
7328                                    "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
7329                                    i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
7330                                    validation_error_map[VALIDATION_ERROR_140006a2]);
7331                }
7332
7333                if (!skip && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
7334                    subpass.pColorAttachments[j].attachment == VK_ATTACHMENT_UNUSED) {
7335                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7336                                    0, __LINE__, VALIDATION_ERROR_1400069e, "DS",
7337                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
7338                                    "which has attachment=VK_ATTACHMENT_UNUSED. %s",
7339                                    i, attachment, validation_error_map[VALIDATION_ERROR_1400069e]);
7340                }
7341            }
7342            attachment = subpass.pColorAttachments[j].attachment;
7343            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
7344
7345            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
7346                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
7347
7348                if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
7349                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7350                                    0, __LINE__, VALIDATION_ERROR_140006a0, "DS",
7351                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
7352                                    "which has VK_SAMPLE_COUNT_1_BIT. %s",
7353                                    i, attachment, validation_error_map[VALIDATION_ERROR_140006a0]);
7354                }
7355
7356                if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
7357                    const auto &color_desc = pCreateInfo->pAttachments[attachment];
7358                    const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
7359                    if (color_desc.format != resolve_desc.format) {
7360                        skip |=
7361                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7362                                    0, __LINE__, VALIDATION_ERROR_140006a4, "DS",
7363                                    "CreateRenderPass:  Subpass %u pColorAttachments[%u] resolves to an attachment with a "
7364                                    "different format. "
7365                                    "color format: %u, resolve format: %u. %s",
7366                                    i, j, color_desc.format, resolve_desc.format, validation_error_map[VALIDATION_ERROR_140006a4]);
7367                    }
7368                }
7369            }
7370        }
7371
7372        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7373            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
7374            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
7375
7376            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
7377                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
7378            }
7379        }
7380
7381        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7382            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7383            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
7384        }
7385
7386        if (sample_count && !IsPowerOfTwo(sample_count)) {
7387            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7388                            __LINE__, VALIDATION_ERROR_0082b401, "DS",
7389                            "CreateRenderPass:  Subpass %u attempts to render to "
7390                            "attachments with inconsistent sample counts. %s",
7391                            i, validation_error_map[VALIDATION_ERROR_0082b401]);
7392        }
7393    }
7394    return skip;
7395}
7396
7397static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass,
7398                                   uint32_t index,
7399                                   bool is_read) {
7400    if (index == VK_ATTACHMENT_UNUSED)
7401        return;
7402
7403    if (!render_pass->attachment_first_read.count(index))
7404        render_pass->attachment_first_read[index] = is_read;
7405}
7406
7407VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
7408                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
7409    bool skip = false;
7410    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7411
7412    std::unique_lock<std::mutex> lock(global_lock);
7413    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
7414    //       ValidateLayouts.
7415    skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
7416    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
7417        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
7418                                             VALIDATION_ERROR_13e006b8, VALIDATION_ERROR_13e006bc);
7419        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
7420                                             VALIDATION_ERROR_13e006ba, VALIDATION_ERROR_13e006be);
7421    }
7422    if (!skip) {
7423        skip |= ValidateLayouts(dev_data, device, pCreateInfo);
7424    }
7425    lock.unlock();
7426
7427    if (skip) {
7428        return VK_ERROR_VALIDATION_FAILED_EXT;
7429    }
7430
7431    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
7432
7433    if (VK_SUCCESS == result) {
7434        lock.lock();
7435
7436        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
7437        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
7438        skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency);
7439
7440        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
7441        render_pass->renderPass = *pRenderPass;
7442        render_pass->hasSelfDependency = has_self_dependency;
7443        render_pass->subpassToNode = subpass_to_node;
7444
7445        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7446            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7447            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7448                MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
7449
7450                // resolve attachments are considered to be written
7451                if (subpass.pResolveAttachments) {
7452                    MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
7453                }
7454            }
7455            if (subpass.pDepthStencilAttachment) {
7456                MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
7457            }
7458            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7459                MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
7460            }
7461        }
7462
7463        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
7464    }
7465    return result;
7466}
7467
7468static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
7469                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
7470    bool skip = false;
7471    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7472        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7473                        HandleToUint64(pCB->commandBuffer), __LINE__, error_code, "DS",
7474                        "Cannot execute command %s on a secondary command buffer. %s", cmd_name, validation_error_map[error_code]);
7475    }
7476    return skip;
7477}
7478
7479static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
7480    bool skip = false;
7481    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
7482        &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
7483    if (pRenderPassBegin->renderArea.offset.x < 0 ||
7484        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
7485        pRenderPassBegin->renderArea.offset.y < 0 ||
7486        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
7487        skip |= static_cast<bool>(log_msg(
7488            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7489            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
7490            "Cannot execute a render pass with renderArea not within the bound of the "
7491            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
7492            "height %d.",
7493            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
7494            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
7495    }
7496    return skip;
7497}
7498
7499// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
7500// [load|store]Op flag must be checked
7501// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
7502template <typename T>
7503static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
7504    if (color_depth_op != op && stencil_op != op) {
7505        return false;
7506    }
7507    bool check_color_depth_load_op = !FormatIsStencilOnly(format);
7508    bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
7509
7510    return ((check_color_depth_load_op && (color_depth_op == op)) ||
7511            (check_stencil_load_op && (stencil_op == op)));
7512}
7513
7514VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
7515                                              VkSubpassContents contents) {
7516    bool skip = false;
7517    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7518    std::unique_lock<std::mutex> lock(global_lock);
7519    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
7520    auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
7521    auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
7522    if (cb_node) {
7523        if (render_pass_state) {
7524            uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
7525            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
7526            for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
7527                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
7528                auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
7529                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
7530                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
7531                    clear_op_size = static_cast<uint32_t>(i) + 1;
7532                    std::function<bool()> function = [=]() {
7533                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
7534                        return false;
7535                    };
7536                    cb_node->validate_functions.push_back(function);
7537                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
7538                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
7539                    std::function<bool()> function = [=]() {
7540                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
7541                        return false;
7542                    };
7543                    cb_node->validate_functions.push_back(function);
7544                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
7545                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
7546                    std::function<bool()> function = [=]() {
7547                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
7548                                                          "vkCmdBeginRenderPass()");
7549                    };
7550                    cb_node->validate_functions.push_back(function);
7551                }
7552                if (render_pass_state->attachment_first_read[i]) {
7553                    std::function<bool()> function = [=]() {
7554                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
7555                                                          "vkCmdBeginRenderPass()");
7556                    };
7557                    cb_node->validate_functions.push_back(function);
7558                }
7559            }
7560            if (clear_op_size > pRenderPassBegin->clearValueCount) {
7561                skip |= log_msg(
7562                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7563                    HandleToUint64(render_pass_state->renderPass), __LINE__, VALIDATION_ERROR_1200070c, "DS",
7564                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
7565                    "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
7566                    "0x%" PRIx64
7567                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
7568                    "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
7569                    "attachments that aren't cleared they will be ignored. %s",
7570                    pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass), clear_op_size,
7571                    clear_op_size - 1, validation_error_map[VALIDATION_ERROR_1200070c]);
7572            }
7573            skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
7574            skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
7575                                                          GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
7576            skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00017);
7577            skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
7578            skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00019);
7579            skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
7580                                          VALIDATION_ERROR_17a02415);
7581            skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
7582            UpdateCmdBufferLastCmd(cb_node, CMD_BEGINRENDERPASS);
7583            cb_node->activeRenderPass = render_pass_state;
7584            // This is a shallow copy as that is all that is needed for now
7585            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
7586            cb_node->activeSubpass = 0;
7587            cb_node->activeSubpassContents = contents;
7588            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
7589            // Connect this framebuffer and its children to this cmdBuffer
7590            AddFramebufferBinding(dev_data, cb_node, framebuffer);
7591            // transition attachments to the correct layouts for beginning of renderPass and first subpass
7592            TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
7593        }
7594    }
7595    lock.unlock();
7596    if (!skip) {
7597        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
7598    }
7599}
7600
7601VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
7602    bool skip = false;
7603    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7604    std::unique_lock<std::mutex> lock(global_lock);
7605    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7606    if (pCB) {
7607        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600019);
7608        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b602415);
7609        skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
7610        UpdateCmdBufferLastCmd(pCB, CMD_NEXTSUBPASS);
7611        skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600017);
7612
7613        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
7614        if (pCB->activeSubpass == subpassCount - 1) {
7615            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7616                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1b60071a, "DS",
7617                            "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s",
7618                            validation_error_map[VALIDATION_ERROR_1b60071a]);
7619        }
7620    }
7621    lock.unlock();
7622
7623    if (skip) return;
7624
7625    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
7626
7627    if (pCB) {
7628        lock.lock();
7629        pCB->activeSubpass++;
7630        pCB->activeSubpassContents = contents;
7631        TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
7632                                 GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
7633    }
7634}
7635
7636VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
7637    bool skip = false;
7638    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7639    std::unique_lock<std::mutex> lock(global_lock);
7640    auto pCB = GetCBNode(dev_data, commandBuffer);
7641    FRAMEBUFFER_STATE *framebuffer = NULL;
7642    if (pCB) {
7643        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
7644        framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
7645        if (rp_state) {
7646            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
7647                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7648                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
7649                                VALIDATION_ERROR_1b00071c, "DS", "vkCmdEndRenderPass(): Called before reaching final subpass. %s",
7650                                validation_error_map[VALIDATION_ERROR_1b00071c]);
7651            }
7652
7653            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
7654                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
7655                auto pAttachment = &rp_state->createInfo.pAttachments[i];
7656                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
7657                                                         VK_ATTACHMENT_STORE_OP_STORE)) {
7658                    std::function<bool()> function = [=]() {
7659                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
7660                        return false;
7661                    };
7662                    pCB->validate_functions.push_back(function);
7663                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
7664                                                                pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
7665                    std::function<bool()> function = [=]() {
7666                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
7667                        return false;
7668                    };
7669                    pCB->validate_functions.push_back(function);
7670                }
7671            }
7672        }
7673        skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_1b000017);
7674        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_1b000019);
7675        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b002415);
7676        skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
7677        UpdateCmdBufferLastCmd(pCB, CMD_ENDRENDERPASS);
7678    }
7679    lock.unlock();
7680
7681    if (skip) return;
7682
7683    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
7684
7685    if (pCB) {
7686        lock.lock();
7687        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
7688        pCB->activeRenderPass = nullptr;
7689        pCB->activeSubpass = 0;
7690        pCB->activeFramebuffer = VK_NULL_HANDLE;
7691    }
7692}
7693
7694static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
7695                                        uint32_t secondaryAttach, const char *msg) {
7696    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7697                   HandleToUint64(secondaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c4, "DS",
7698                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64
7699                   " which has a render pass "
7700                   "that is not compatible with the Primary Cmd Buffer current render pass. "
7701                   "Attachment %u is not compatible with %u: %s. %s",
7702                   HandleToUint64(secondaryBuffer), primaryAttach, secondaryAttach, msg,
7703                   validation_error_map[VALIDATION_ERROR_1b2000c4]);
7704}
7705
7706static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7707                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
7708                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
7709                                            uint32_t secondaryAttach, bool is_multi) {
7710    bool skip = false;
7711    if (primaryPassCI->attachmentCount <= primaryAttach) {
7712        primaryAttach = VK_ATTACHMENT_UNUSED;
7713    }
7714    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
7715        secondaryAttach = VK_ATTACHMENT_UNUSED;
7716    }
7717    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
7718        return skip;
7719    }
7720    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
7721        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
7722                                            "The first is unused while the second is not.");
7723        return skip;
7724    }
7725    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
7726        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
7727                                            "The second is unused while the first is not.");
7728        return skip;
7729    }
7730    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
7731        skip |=
7732            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
7733    }
7734    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
7735        skip |=
7736            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
7737    }
7738    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
7739        skip |=
7740            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
7741    }
7742    return skip;
7743}
7744
7745static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7746                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
7747                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
7748    bool skip = false;
7749    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
7750    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
7751    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
7752    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
7753        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
7754        if (i < primary_desc.inputAttachmentCount) {
7755            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
7756        }
7757        if (i < secondary_desc.inputAttachmentCount) {
7758            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
7759        }
7760        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
7761                                                secondaryPassCI, secondary_input_attach, is_multi);
7762    }
7763    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
7764    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
7765        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
7766        if (i < primary_desc.colorAttachmentCount) {
7767            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
7768        }
7769        if (i < secondary_desc.colorAttachmentCount) {
7770            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
7771        }
7772        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
7773                                                secondaryPassCI, secondary_color_attach, is_multi);
7774        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
7775        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
7776            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
7777        }
7778        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
7779            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
7780        }
7781        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach, secondaryBuffer,
7782                                                secondaryPassCI, secondary_resolve_attach, is_multi);
7783    }
7784    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
7785    if (primary_desc.pDepthStencilAttachment) {
7786        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
7787    }
7788    if (secondary_desc.pDepthStencilAttachment) {
7789        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
7790    }
7791    skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach, secondaryBuffer,
7792                                            secondaryPassCI, secondary_depthstencil_attach, is_multi);
7793    return skip;
7794}
7795
7796// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
7797//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
7798//  will then feed into this function
7799static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7800                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
7801                                            VkRenderPassCreateInfo const *secondaryPassCI) {
7802    bool skip = false;
7803
7804    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
7805        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7806                        HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7807                        "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
7808                        " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
7809                        " that has a subpassCount of %u.",
7810                        HandleToUint64(secondaryBuffer), secondaryPassCI->subpassCount, HandleToUint64(primaryBuffer),
7811                        primaryPassCI->subpassCount);
7812    } else {
7813        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
7814            skip |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
7815                                                 primaryPassCI->subpassCount > 1);
7816        }
7817    }
7818    return skip;
7819}
7820
7821static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
7822                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
7823    bool skip = false;
7824    if (!pSubCB->beginInfo.pInheritanceInfo) {
7825        return skip;
7826    }
7827    VkFramebuffer primary_fb = pCB->activeFramebuffer;
7828    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
7829    if (secondary_fb != VK_NULL_HANDLE) {
7830        if (primary_fb != secondary_fb) {
7831            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7832                            HandleToUint64(primaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c6, "DS",
7833                            "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
7834                            " which has a framebuffer 0x%" PRIx64
7835                            " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
7836                            HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb),
7837                            validation_error_map[VALIDATION_ERROR_1b2000c6]);
7838        }
7839        auto fb = GetFramebufferState(dev_data, secondary_fb);
7840        if (!fb) {
7841            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7842                            HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7843                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7844                            "which has invalid framebuffer 0x%" PRIx64 ".",
7845                            (void *)secondaryBuffer, HandleToUint64(secondary_fb));
7846            return skip;
7847        }
7848        auto cb_renderpass = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
7849        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
7850            skip |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
7851                                                    cb_renderpass->createInfo.ptr());
7852        }
7853    }
7854    return skip;
7855}
7856
7857static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
7858    bool skip = false;
7859    unordered_set<int> activeTypes;
7860    for (auto queryObject : pCB->activeQueries) {
7861        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
7862        if (queryPoolData != dev_data->queryPoolMap.end()) {
7863            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
7864                pSubCB->beginInfo.pInheritanceInfo) {
7865                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
7866                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
7867                    skip |= log_msg(
7868                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7869                        HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1b2000d0, "DS",
7870                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7871                        "which has invalid active query pool 0x%" PRIx64
7872                        ". Pipeline statistics is being queried so the command "
7873                        "buffer must have all bits set on the queryPool. %s",
7874                        pCB->commandBuffer, HandleToUint64(queryPoolData->first), validation_error_map[VALIDATION_ERROR_1b2000d0]);
7875                }
7876            }
7877            activeTypes.insert(queryPoolData->second.createInfo.queryType);
7878        }
7879    }
7880    for (auto queryObject : pSubCB->startedQueries) {
7881        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
7882        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
7883            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7884                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7885                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7886                            "which has invalid active query pool 0x%" PRIx64
7887                            "of type %d but a query of that type has been started on "
7888                            "secondary Cmd Buffer 0x%p.",
7889                            pCB->commandBuffer, HandleToUint64(queryPoolData->first), queryPoolData->second.createInfo.queryType,
7890                            pSubCB->commandBuffer);
7891        }
7892    }
7893
7894    auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
7895    auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
7896    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
7897        skip |=
7898            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7899                    HandleToUint64(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
7900                    "vkCmdExecuteCommands(): Primary command buffer 0x%p"
7901                    " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
7902                    pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
7903    }
7904
7905    return skip;
7906}
7907
7908VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
7909                                              const VkCommandBuffer *pCommandBuffers) {
7910    bool skip = false;
7911    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7912    std::unique_lock<std::mutex> lock(global_lock);
7913    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7914    if (pCB) {
7915        GLOBAL_CB_NODE *pSubCB = NULL;
7916        for (uint32_t i = 0; i < commandBuffersCount; i++) {
7917            pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
7918            assert(pSubCB);
7919            if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
7920                skip |=
7921                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7922                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000b0, "DS",
7923                            "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
7924                            "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
7925                            pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_1b2000b0]);
7926            } else if (pCB->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
7927                if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
7928                    auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
7929                    if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7930                        skip |= log_msg(
7931                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7932                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000c0, "DS",
7933                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
7934                            ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT "
7935                            "set. %s",
7936                            pCommandBuffers[i], HandleToUint64(pCB->activeRenderPass->renderPass),
7937                            validation_error_map[VALIDATION_ERROR_1b2000c0]);
7938                    } else {
7939                        // Make sure render pass is compatible with parent command buffer pass if has continue
7940                        if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
7941                            skip |=
7942                                validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
7943                                                                pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
7944                        }
7945                        //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
7946                        skip |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
7947                    }
7948                    string errorString = "";
7949                    // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
7950                    if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
7951                        !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
7952                                                         secondary_rp_state->createInfo.ptr(), errorString)) {
7953                        skip |= log_msg(
7954                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7955                            HandleToUint64(pCommandBuffers[i]), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
7956                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
7957                            ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
7958                            pCommandBuffers[i], HandleToUint64(pSubCB->beginInfo.pInheritanceInfo->renderPass), commandBuffer,
7959                            HandleToUint64(pCB->activeRenderPass->renderPass), errorString.c_str());
7960                    }
7961                }
7962            }
7963            // TODO(mlentine): Move more logic into this method
7964            skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
7965            skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0, VALIDATION_ERROR_1b2000b2);
7966            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
7967                if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
7968                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7969                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
7970                                    VALIDATION_ERROR_1b2000b4, "DS",
7971                                    "Attempt to simultaneously execute command buffer 0x%p"
7972                                    " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
7973                                    pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_1b2000b4]);
7974                }
7975                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
7976                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
7977                    skip |= log_msg(
7978                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7979                        HandleToUint64(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
7980                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
7981                        "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
7982                        "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
7983                        "set, even though it does.",
7984                        pCommandBuffers[i], pCB->commandBuffer);
7985                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
7986                }
7987            }
7988            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
7989                skip |=
7990                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7991                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000ca, "DS",
7992                            "vkCmdExecuteCommands(): Secondary Command Buffer "
7993                            "(0x%p) cannot be submitted with a query in "
7994                            "flight and inherited queries not "
7995                            "supported on this device. %s",
7996                            pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_1b2000ca]);
7997            }
7998            // TODO: separate validate from update! This is very tangled.
7999            // Propagate layout transitions to the primary cmd buffer
8000            for (auto ilm_entry : pSubCB->imageLayoutMap) {
8001                SetLayout(dev_data, pCB, ilm_entry.first, ilm_entry.second);
8002            }
8003            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
8004            pCB->linkedCommandBuffers.insert(pSubCB);
8005            pSubCB->linkedCommandBuffers.insert(pCB);
8006            for (auto &function : pSubCB->queryUpdates) {
8007                pCB->queryUpdates.push_back(function);
8008            }
8009        }
8010        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_1b200019);
8011        skip |=
8012            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
8013                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b202415);
8014        skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
8015        UpdateCmdBufferLastCmd(pCB, CMD_EXECUTECOMMANDS);
8016    }
8017    lock.unlock();
8018    if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
8019}
8020
8021VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
8022                                         void **ppData) {
8023    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8024
8025    bool skip = false;
8026    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8027    std::unique_lock<std::mutex> lock(global_lock);
8028    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
8029    if (mem_info) {
8030        // TODO : This could me more fine-grained to track just region that is valid
8031        mem_info->global_valid = true;
8032        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
8033        skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
8034        // TODO : Do we need to create new "bound_range" for the mapped range?
8035        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
8036        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
8037             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
8038            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8039                           HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200554, "MEM",
8040                           "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
8041                           HandleToUint64(mem), validation_error_map[VALIDATION_ERROR_31200554]);
8042        }
8043    }
8044    skip |= ValidateMapMemRange(dev_data, mem, offset, size);
8045    lock.unlock();
8046
8047    if (!skip) {
8048        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
8049        if (VK_SUCCESS == result) {
8050            lock.lock();
8051            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
8052            storeMemRanges(dev_data, mem, offset, size);
8053            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
8054            lock.unlock();
8055        }
8056    }
8057    return result;
8058}
8059
8060VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
8061    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8062    bool skip = false;
8063
8064    std::unique_lock<std::mutex> lock(global_lock);
8065    skip |= deleteMemRanges(dev_data, mem);
8066    lock.unlock();
8067    if (!skip) {
8068        dev_data->dispatch_table.UnmapMemory(device, mem);
8069    }
8070}
8071
8072static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
8073                                   const VkMappedMemoryRange *pMemRanges) {
8074    bool skip = false;
8075    for (uint32_t i = 0; i < memRangeCount; ++i) {
8076        auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
8077        if (mem_info) {
8078            if (pMemRanges[i].size == VK_WHOLE_SIZE) {
8079                if (mem_info->mem_range.offset > pMemRanges[i].offset) {
8080                    skip |=
8081                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8082                                HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055c, "MEM",
8083                                "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
8084                                ") is less than Memory Object's offset "
8085                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
8086                                funcName, static_cast<size_t>(pMemRanges[i].offset),
8087                                static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_0c20055c]);
8088                }
8089            } else {
8090                const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
8091                                              ? mem_info->alloc_info.allocationSize
8092                                              : (mem_info->mem_range.offset + mem_info->mem_range.size);
8093                if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
8094                    (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
8095                    skip |=
8096                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8097                                HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055a, "MEM",
8098                                "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
8099                                ") exceed the Memory Object's upper-bound "
8100                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
8101                                funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
8102                                static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
8103                                validation_error_map[VALIDATION_ERROR_0c20055a]);
8104                }
8105            }
8106        }
8107    }
8108    return skip;
8109}
8110
8111static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
8112                                                     const VkMappedMemoryRange *mem_ranges) {
8113    bool skip = false;
8114    for (uint32_t i = 0; i < mem_range_count; ++i) {
8115        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
8116        if (mem_info) {
8117            if (mem_info->shadow_copy) {
8118                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
8119                                        ? mem_info->mem_range.size
8120                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
8121                char *data = static_cast<char *>(mem_info->shadow_copy);
8122                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
8123                    if (data[j] != NoncoherentMemoryFillValue) {
8124                        skip |= log_msg(
8125                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8126                            HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
8127                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, HandleToUint64(mem_ranges[i].memory));
8128                    }
8129                }
8130                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
8131                    if (data[j] != NoncoherentMemoryFillValue) {
8132                        skip |= log_msg(
8133                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8134                            HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
8135                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, HandleToUint64(mem_ranges[i].memory));
8136                    }
8137                }
8138                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
8139            }
8140        }
8141    }
8142    return skip;
8143}
8144
8145static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
8146    for (uint32_t i = 0; i < mem_range_count; ++i) {
8147        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
8148        if (mem_info && mem_info->shadow_copy) {
8149            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
8150                                    ? mem_info->mem_range.size
8151                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
8152            char *data = static_cast<char *>(mem_info->shadow_copy);
8153            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
8154        }
8155    }
8156}
8157
8158static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
8159                                                  const VkMappedMemoryRange *mem_ranges) {
8160    bool skip = false;
8161    for (uint32_t i = 0; i < mem_range_count; ++i) {
8162        uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
8163        if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
8164            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8165                            HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c20055e, "MEM",
8166                            "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
8167                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
8168                            func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_0c20055e]);
8169        }
8170        if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
8171            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8172                            HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c200560, "MEM",
8173                            "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
8174                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
8175                            func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_0c200560]);
8176        }
8177    }
8178    return skip;
8179}
8180
8181static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8182                                                   const VkMappedMemoryRange *mem_ranges) {
8183    bool skip = false;
8184    std::lock_guard<std::mutex> lock(global_lock);
8185    skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
8186    skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
8187    return skip;
8188}
8189
8190VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
8191                                                       const VkMappedMemoryRange *pMemRanges) {
8192    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8193    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8194
8195    if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
8196        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
8197    }
8198    return result;
8199}
8200
8201static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8202                                                        const VkMappedMemoryRange *mem_ranges) {
8203    bool skip = false;
8204    std::lock_guard<std::mutex> lock(global_lock);
8205    skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
8206    return skip;
8207}
8208
8209static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8210                                                       const VkMappedMemoryRange *mem_ranges) {
8211    std::lock_guard<std::mutex> lock(global_lock);
8212    // Update our shadow copy with modified driver data
8213    CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
8214}
8215
8216VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
8217                                                            const VkMappedMemoryRange *pMemRanges) {
8218    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8219    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8220
8221    if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
8222        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
8223        if (result == VK_SUCCESS) {
8224            PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
8225        }
8226    }
8227    return result;
8228}
8229
8230static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
8231                                           VkDeviceSize memoryOffset) {
8232    bool skip = false;
8233    if (image_state) {
8234        std::unique_lock<std::mutex> lock(global_lock);
8235        // Track objects tied to memory
8236        uint64_t image_handle = HandleToUint64(image);
8237        skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
8238        if (!image_state->memory_requirements_checked) {
8239            // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
8240            // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
8241            // vkGetImageMemoryRequirements()
8242            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8243                            image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
8244                            "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
8245                            " but vkGetImageMemoryRequirements() has not been called on that image.",
8246                            image_handle);
8247            // Make the call for them so we can verify the state
8248            lock.unlock();
8249            dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
8250            lock.lock();
8251        }
8252
8253        // Validate bound memory range information
8254        auto mem_info = GetMemObjInfo(dev_data, mem);
8255        if (mem_info) {
8256            skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
8257                                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, "vkBindImageMemory()");
8258            skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
8259                                        VALIDATION_ERROR_1740082e);
8260        }
8261
8262        // Validate memory requirements alignment
8263        if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
8264            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8265                            image_handle, __LINE__, VALIDATION_ERROR_17400830, "DS",
8266                            "vkBindImageMemory(): memoryOffset is 0x%" PRIxLEAST64
8267                            " but must be an integer multiple of the "
8268                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
8269                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
8270                            memoryOffset, image_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_17400830]);
8271        }
8272
8273        // Validate memory requirements size
8274        if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
8275            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8276                            image_handle, __LINE__, VALIDATION_ERROR_17400832, "DS",
8277                            "vkBindImageMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
8278                            " but must be at least as large as "
8279                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
8280                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
8281                            mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size,
8282                            validation_error_map[VALIDATION_ERROR_17400832]);
8283        }
8284    }
8285    return skip;
8286}
8287
8288static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
8289                                          VkDeviceSize memoryOffset) {
8290    if (image_state) {
8291        std::unique_lock<std::mutex> lock(global_lock);
8292        // Track bound memory range information
8293        auto mem_info = GetMemObjInfo(dev_data, mem);
8294        if (mem_info) {
8295            InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
8296                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
8297        }
8298
8299        // Track objects tied to memory
8300        uint64_t image_handle = HandleToUint64(image);
8301        SetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
8302
8303        image_state->binding.mem = mem;
8304        image_state->binding.offset = memoryOffset;
8305        image_state->binding.size = image_state->requirements.size;
8306    }
8307}
8308
8309VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
8310    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8311    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8312    auto image_state = GetImageState(dev_data, image);
8313    bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
8314    if (!skip) {
8315        result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
8316        if (result == VK_SUCCESS) {
8317            PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
8318        }
8319    }
8320    return result;
8321}
8322
8323VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
8324    bool skip = false;
8325    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8326    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8327    std::unique_lock<std::mutex> lock(global_lock);
8328    auto event_state = GetEventNode(dev_data, event);
8329    if (event_state) {
8330        event_state->needsSignaled = false;
8331        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
8332        if (event_state->write_in_use) {
8333            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8334                            HandleToUint64(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8335                            "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
8336                            HandleToUint64(event));
8337        }
8338    }
8339    lock.unlock();
8340    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
8341    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
8342    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
8343    for (auto queue_data : dev_data->queueMap) {
8344        auto event_entry = queue_data.second.eventToStageMap.find(event);
8345        if (event_entry != queue_data.second.eventToStageMap.end()) {
8346            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
8347        }
8348    }
8349    if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
8350    return result;
8351}
8352
8353VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
8354                                               VkFence fence) {
8355    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8356    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8357    bool skip = false;
8358    std::unique_lock<std::mutex> lock(global_lock);
8359    auto pFence = GetFenceNode(dev_data, fence);
8360    auto pQueue = GetQueueState(dev_data, queue);
8361
8362    // First verify that fence is not in use
8363    skip |= ValidateFenceForSubmit(dev_data, pFence);
8364
8365    if (pFence) {
8366        SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
8367    }
8368
8369    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
8370        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
8371        // Track objects tied to memory
8372        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
8373            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
8374                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
8375                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
8376                                        HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer))
8377                    skip = true;
8378            }
8379        }
8380        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
8381            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
8382                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
8383                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
8384                                        HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage))
8385                    skip = true;
8386            }
8387        }
8388        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
8389            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
8390                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
8391                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
8392                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
8393                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
8394                                        HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage))
8395                    skip = true;
8396            }
8397        }
8398
8399        std::vector<SEMAPHORE_WAIT> semaphore_waits;
8400        std::vector<VkSemaphore> semaphore_signals;
8401        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
8402            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
8403            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
8404            if (pSemaphore) {
8405                if (pSemaphore->signaled) {
8406                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
8407                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
8408                        pSemaphore->in_use.fetch_add(1);
8409                    }
8410                    pSemaphore->signaler.first = VK_NULL_HANDLE;
8411                    pSemaphore->signaled = false;
8412                } else {
8413                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
8414                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8415                                    "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64
8416                                    " that has no way to be signaled.",
8417                                    queue, HandleToUint64(semaphore));
8418                }
8419            }
8420        }
8421        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
8422            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
8423            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
8424            if (pSemaphore) {
8425                if (pSemaphore->signaled) {
8426                    skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
8427                                   HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8428                                   "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
8429                                   ", but that semaphore is already signaled.",
8430                                   queue, HandleToUint64(semaphore));
8431                } else {
8432                    pSemaphore->signaler.first = queue;
8433                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
8434                    pSemaphore->signaled = true;
8435                    pSemaphore->in_use.fetch_add(1);
8436                    semaphore_signals.push_back(semaphore);
8437                }
8438            }
8439        }
8440
8441        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals,
8442                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
8443    }
8444
8445    if (pFence && !bindInfoCount) {
8446        // No work to do, just dropping a fence in the queue by itself.
8447        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
8448                                         fence);
8449    }
8450
8451    lock.unlock();
8452
8453    if (!skip) return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
8454
8455    return result;
8456}
8457
8458VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
8459                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
8460    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8461    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
8462    if (result == VK_SUCCESS) {
8463        std::lock_guard<std::mutex> lock(global_lock);
8464        SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
8465        sNode->signaler.first = VK_NULL_HANDLE;
8466        sNode->signaler.second = 0;
8467        sNode->signaled = false;
8468    }
8469    return result;
8470}
8471
8472VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
8473                                           const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
8474    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8475    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
8476    if (result == VK_SUCCESS) {
8477        std::lock_guard<std::mutex> lock(global_lock);
8478        dev_data->eventMap[*pEvent].needsSignaled = false;
8479        dev_data->eventMap[*pEvent].write_in_use = 0;
8480        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
8481    }
8482    return result;
8483}
8484
8485static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
8486                                              VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
8487                                              SWAPCHAIN_NODE *old_swapchain_state) {
8488    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
8489
8490    // TODO: revisit this. some of these rules are being relaxed.
8491
8492    // All physical devices and queue families are required to be able
8493    // to present to any native window on Android; require the
8494    // application to have established support on any other platform.
8495    if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
8496        auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::const_reference qs) -> bool {
8497            // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
8498            return (qs.first.gpu == dev_data->physical_device) && qs.second;
8499        };
8500        const auto& support = surface_state->gpu_queue_support;
8501        bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
8502
8503        if (!is_supported) {
8504            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8505                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ec, "DS",
8506                        "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. "
8507                        "The vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support "
8508                        "with this surface for at least one queue family of this device. %s",
8509                        func_name, validation_error_map[VALIDATION_ERROR_146009ec]))
8510                return true;
8511        }
8512    }
8513
8514    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
8515        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8516                    HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
8517                    "%s: surface has an existing swapchain other than oldSwapchain", func_name))
8518            return true;
8519    }
8520    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
8521        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8522                    HandleToUint64(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE, "DS",
8523                    "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
8524            return true;
8525    }
8526    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
8527    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
8528        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
8529                    HandleToUint64(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8530                    "%s: surface capabilities not retrieved for this physical device", func_name))
8531            return true;
8532    } else {  // have valid capabilities
8533        auto &capabilities = physical_device_state->surfaceCapabilities;
8534        // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
8535        if (pCreateInfo->minImageCount < capabilities.minImageCount) {
8536            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8537                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ee, "DS",
8538                        "%s called with minImageCount = %d, which is outside the bounds returned "
8539                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
8540                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
8541                        validation_error_map[VALIDATION_ERROR_146009ee]))
8542                return true;
8543        }
8544
8545        if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
8546            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8547                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f0, "DS",
8548                        "%s called with minImageCount = %d, which is outside the bounds returned "
8549                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
8550                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
8551                        validation_error_map[VALIDATION_ERROR_146009f0]))
8552                return true;
8553        }
8554
8555        // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
8556        if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
8557            ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
8558             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
8559             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
8560             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
8561            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8562                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f4, "DS",
8563                        "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
8564                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
8565                        "maxImageExtent = (%d,%d). %s",
8566                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
8567                        capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
8568                        capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
8569                        validation_error_map[VALIDATION_ERROR_146009f4]))
8570                return true;
8571        }
8572        // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
8573        // VkSurfaceCapabilitiesKHR::supportedTransforms.
8574        if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
8575            !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
8576            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
8577            // it up a little at a time, and then log it:
8578            std::string errorString = "";
8579            char str[1024];
8580            // Here's the first part of the message:
8581            sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
8582                    string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
8583            errorString += str;
8584            for (int i = 0; i < 32; i++) {
8585                // Build up the rest of the message:
8586                if ((1 << i) & capabilities.supportedTransforms) {
8587                    const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
8588                    sprintf(str, "    %s\n", newStr);
8589                    errorString += str;
8590                }
8591            }
8592            // Log the message that we've built up:
8593            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8594                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009fe, "DS", "%s. %s", errorString.c_str(),
8595                        validation_error_map[VALIDATION_ERROR_146009fe]))
8596                return true;
8597        }
8598
8599        // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
8600        // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
8601        if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
8602            !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
8603            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
8604            // it up a little at a time, and then log it:
8605            std::string errorString = "";
8606            char str[1024];
8607            // Here's the first part of the message:
8608            sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
8609                    func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
8610            errorString += str;
8611            for (int i = 0; i < 32; i++) {
8612                // Build up the rest of the message:
8613                if ((1 << i) & capabilities.supportedCompositeAlpha) {
8614                    const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
8615                    sprintf(str, "    %s\n", newStr);
8616                    errorString += str;
8617                }
8618            }
8619            // Log the message that we've built up:
8620            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8621                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a00, "DS", "%s. %s", errorString.c_str(),
8622                        validation_error_map[VALIDATION_ERROR_14600a00]))
8623                return true;
8624        }
8625        // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
8626        if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
8627            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8628                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f6, "DS",
8629                        "%s called with a non-supported imageArrayLayers (i.e. %d).  Minimum value is 1, maximum value is %d. %s",
8630                        func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
8631                        validation_error_map[VALIDATION_ERROR_146009f6]))
8632                return true;
8633        }
8634        // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
8635        if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
8636            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8637                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f8, "DS",
8638                        "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
8639                        func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
8640                        validation_error_map[VALIDATION_ERROR_146009f8]))
8641                return true;
8642        }
8643    }
8644
8645    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
8646    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
8647        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8648                    HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8649                    "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
8650            return true;
8651    } else {
8652        // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
8653        bool foundFormat = false;
8654        bool foundColorSpace = false;
8655        bool foundMatch = false;
8656        for (auto const &format : physical_device_state->surface_formats) {
8657            if (pCreateInfo->imageFormat == format.format) {
8658                // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
8659                foundFormat = true;
8660                if (pCreateInfo->imageColorSpace == format.colorSpace) {
8661                    foundMatch = true;
8662                    break;
8663                }
8664            } else {
8665                if (pCreateInfo->imageColorSpace == format.colorSpace) {
8666                    foundColorSpace = true;
8667                }
8668            }
8669        }
8670        if (!foundMatch) {
8671            if (!foundFormat) {
8672                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8673                            HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
8674                            "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s", func_name,
8675                            pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_146009f2]))
8676                    return true;
8677            }
8678            if (!foundColorSpace) {
8679                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8680                            HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
8681                            "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s", func_name,
8682                            pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_146009f2]))
8683                    return true;
8684            }
8685        }
8686    }
8687
8688    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
8689    if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
8690        // FIFO is required to always be supported
8691        if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
8692            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8693                        HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8694                        "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
8695                return true;
8696        }
8697    } else {
8698        // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
8699        bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
8700                                    pCreateInfo->presentMode) != physical_device_state->present_modes.end();
8701        if (!foundMatch) {
8702            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8703                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a02, "DS",
8704                        "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
8705                        string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_14600a02]))
8706                return true;
8707        }
8708    }
8709    // Validate state for shared presentable case
8710    if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
8711        VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
8712        if (!dev_data->extensions.vk_khr_shared_presentable_image) {
8713            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8714                        HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_EXTENSION_NOT_ENABLED, "DS",
8715                        "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
8716                        "been enabled.",
8717                        func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
8718                return true;
8719        } else if (pCreateInfo->minImageCount != 1) {
8720            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8721                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_14600ace, "DS",
8722                        "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
8723                        "must be 1. %s",
8724                        func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount,
8725                        validation_error_map[VALIDATION_ERROR_14600ace]))
8726                return true;
8727        }
8728    }
8729
8730    return false;
8731}
8732
8733static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
8734                                             VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
8735                                             SWAPCHAIN_NODE *old_swapchain_state) {
8736    if (VK_SUCCESS == result) {
8737        std::lock_guard<std::mutex> lock(global_lock);
8738        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
8739        if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
8740            VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
8741            swapchain_state->shared_presentable = true;
8742        }
8743        surface_state->swapchain = swapchain_state.get();
8744        dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
8745    } else {
8746        surface_state->swapchain = nullptr;
8747    }
8748    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
8749    if (old_swapchain_state) {
8750        old_swapchain_state->replaced = true;
8751    }
8752    surface_state->old_swapchain = old_swapchain_state;
8753    return;
8754}
8755
8756VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
8757                                                  const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
8758    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8759    auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
8760    auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
8761
8762    if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
8763        return VK_ERROR_VALIDATION_FAILED_EXT;
8764    }
8765
8766    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
8767
8768    PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
8769
8770    return result;
8771}
8772
8773VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
8774    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8775    bool skip = false;
8776
8777    std::unique_lock<std::mutex> lock(global_lock);
8778    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
8779    if (swapchain_data) {
8780        if (swapchain_data->images.size() > 0) {
8781            for (auto swapchain_image : swapchain_data->images) {
8782                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
8783                if (image_sub != dev_data->imageSubresourceMap.end()) {
8784                    for (auto imgsubpair : image_sub->second) {
8785                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
8786                        if (image_item != dev_data->imageLayoutMap.end()) {
8787                            dev_data->imageLayoutMap.erase(image_item);
8788                        }
8789                    }
8790                    dev_data->imageSubresourceMap.erase(image_sub);
8791                }
8792                skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
8793                dev_data->imageMap.erase(swapchain_image);
8794            }
8795        }
8796
8797        auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
8798        if (surface_state) {
8799            if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
8800            if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
8801        }
8802
8803        dev_data->swapchainMap.erase(swapchain);
8804    }
8805    lock.unlock();
8806    if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
8807}
8808
8809static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
8810                                                 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
8811    bool skip = false;
8812    if (swapchain_state && pSwapchainImages) {
8813        std::lock_guard<std::mutex> lock(global_lock);
8814        // Compare the preliminary value of *pSwapchainImageCount with the value this time:
8815        if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
8816            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8817                            HandleToUint64(device), __LINE__, SWAPCHAIN_PRIOR_COUNT, "DS",
8818                            "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive "
8819                            "value has been seen for pSwapchainImages.");
8820        } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
8821            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8822                            HandleToUint64(device), __LINE__, SWAPCHAIN_INVALID_COUNT, "DS",
8823                            "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with "
8824                            "pSwapchainImages set to a value (%d) that is greater than the value (%d) that was returned when "
8825                            "pSwapchainImageCount was NULL.",
8826                            *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
8827        }
8828    }
8829    return skip;
8830}
8831
8832static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
8833                                                uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
8834    std::lock_guard<std::mutex> lock(global_lock);
8835
8836    if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
8837
8838    if (pSwapchainImages) {
8839        if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
8840            swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
8841        }
8842        for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
8843            if (swapchain_state->images[i] != VK_NULL_HANDLE) continue;  // Already retrieved this.
8844
8845            IMAGE_LAYOUT_NODE image_layout_node;
8846            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
8847            image_layout_node.format = swapchain_state->createInfo.imageFormat;
8848            // Add imageMap entries for each swapchain image
8849            VkImageCreateInfo image_ci = {};
8850            image_ci.flags = 0;
8851            image_ci.imageType = VK_IMAGE_TYPE_2D;
8852            image_ci.format = swapchain_state->createInfo.imageFormat;
8853            image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
8854            image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
8855            image_ci.extent.depth = 1;
8856            image_ci.mipLevels = 1;
8857            image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
8858            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
8859            image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
8860            image_ci.usage = swapchain_state->createInfo.imageUsage;
8861            image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
8862            device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
8863            auto &image_state = device_data->imageMap[pSwapchainImages[i]];
8864            image_state->valid = false;
8865            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
8866            swapchain_state->images[i] = pSwapchainImages[i];
8867            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
8868            device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
8869            device_data->imageLayoutMap[subpair] = image_layout_node;
8870        }
8871    }
8872
8873    if (*pSwapchainImageCount) {
8874        if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
8875            swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
8876        }
8877        swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
8878    }
8879}
8880
8881VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
8882                                                     VkImage *pSwapchainImages) {
8883    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8884    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8885
8886    auto swapchain_state = GetSwapchainNode(device_data, swapchain);
8887    bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
8888
8889    if (!skip) {
8890        result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
8891    }
8892
8893    if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
8894        PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
8895    }
8896    return result;
8897}
8898
8899VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
8900    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8901    bool skip = false;
8902
8903    std::lock_guard<std::mutex> lock(global_lock);
8904    auto queue_state = GetQueueState(dev_data, queue);
8905
8906    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
8907        auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
8908        if (pSemaphore && !pSemaphore->signaled) {
8909            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8910                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8911                            "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
8912                            HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
8913        }
8914    }
8915
8916    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
8917        auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
8918        if (swapchain_data) {
8919            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
8920                skip |=
8921                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8922                            HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
8923                            "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
8924                            pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
8925            } else {
8926                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
8927                auto image_state = GetImageState(dev_data, image);
8928
8929                if (image_state->shared_presentable) {
8930                    image_state->layout_locked = true;
8931                }
8932
8933                skip |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
8934
8935                if (!image_state->acquired) {
8936                    skip |= log_msg(
8937                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8938                        HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED, "DS",
8939                        "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
8940                }
8941
8942                vector<VkImageLayout> layouts;
8943                if (FindLayouts(dev_data, image, layouts)) {
8944                    for (auto layout : layouts) {
8945                        if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) &&
8946                            (!dev_data->extensions.vk_khr_shared_presentable_image ||
8947                             (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
8948                            skip |=
8949                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
8950                                        HandleToUint64(queue), __LINE__, VALIDATION_ERROR_11200a20, "DS",
8951                                        "Images passed to present must be in layout "
8952                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s. %s",
8953                                        string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_11200a20]);
8954                        }
8955                    }
8956                }
8957            }
8958
8959            // All physical devices and queue families are required to be able
8960            // to present to any native window on Android; require the
8961            // application to have established support on any other platform.
8962            if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
8963                auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
8964                auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
8965
8966                if (support_it == surface_state->gpu_queue_support.end()) {
8967                    skip |=
8968                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8969                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS",
8970                                "vkQueuePresentKHR: Presenting image without calling "
8971                                "vkGetPhysicalDeviceSurfaceSupportKHR");
8972                } else if (!support_it->second) {
8973                    skip |=
8974                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8975                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_31800a18, "DS",
8976                                "vkQueuePresentKHR: Presenting image on queue that cannot "
8977                                "present to this surface. %s",
8978                                validation_error_map[VALIDATION_ERROR_31800a18]);
8979                }
8980            }
8981        }
8982    }
8983    if (pPresentInfo && pPresentInfo->pNext) {
8984        // Verify ext struct
8985        struct std_header {
8986            VkStructureType sType;
8987            const void *pNext;
8988        };
8989        std_header *pnext = (std_header *)pPresentInfo->pNext;
8990        while (pnext) {
8991            if (VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR == pnext->sType) {
8992                VkPresentRegionsKHR *present_regions = (VkPresentRegionsKHR *)pnext;
8993                for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
8994                    auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
8995                    assert(swapchain_data);
8996                    VkPresentRegionKHR region = present_regions->pRegions[i];
8997                    for (uint32_t j = 0; j < region.rectangleCount; ++j) {
8998                        VkRectLayerKHR rect = region.pRectangles[j];
8999                        // TODO: Need to update these errors to their unique error ids when available
9000                        if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
9001                            skip |= log_msg(
9002                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9003                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9004                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
9005                                "chain, pRegion[%i].pRectangles[%i], the sum of offset.x "
9006                                "(%i) and extent.width (%i) is greater than the "
9007                                "corresponding swapchain's imageExtent.width (%i).",
9008                                i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
9009                        }
9010                        if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
9011                            skip |= log_msg(
9012                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9013                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9014                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
9015                                "chain, pRegion[%i].pRectangles[%i], the sum of offset.y "
9016                                "(%i) and extent.height (%i) is greater than the "
9017                                "corresponding swapchain's imageExtent.height (%i).",
9018                                i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
9019                        }
9020                        if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
9021                            skip |= log_msg(
9022                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9023                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9024                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the "
9025                                "layer (%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
9026                                i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
9027                        }
9028                    }
9029                }
9030            } else if (VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE == pnext->sType) {
9031                VkPresentTimesInfoGOOGLE *present_times_info = (VkPresentTimesInfoGOOGLE *)pnext;
9032                if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
9033                    skip |=
9034                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9035                                HandleToUint64(pPresentInfo->pSwapchains[0]), __LINE__,
9036
9037                                VALIDATION_ERROR_118009be, "DS",
9038                                "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but "
9039                                "pPresentInfo->swapchainCount is %i. For VkPresentTimesInfoGOOGLE down pNext "
9040                                "chain of VkPresentInfoKHR, VkPresentTimesInfoGOOGLE.swapchainCount "
9041                                "must equal VkPresentInfoKHR.swapchainCount.",
9042                                present_times_info->swapchainCount, pPresentInfo->swapchainCount);
9043                }
9044            }
9045            pnext = (std_header *)pnext->pNext;
9046        }
9047    }
9048
9049    if (skip) {
9050        return VK_ERROR_VALIDATION_FAILED_EXT;
9051    }
9052
9053    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
9054
9055    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
9056        // Semaphore waits occur before error generation, if the call reached
9057        // the ICD. (Confirm?)
9058        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9059            auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
9060            if (pSemaphore) {
9061                pSemaphore->signaler.first = VK_NULL_HANDLE;
9062                pSemaphore->signaled = false;
9063            }
9064        }
9065
9066        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9067            // Note: this is imperfect, in that we can get confused about what
9068            // did or didn't succeed-- but if the app does that, it's confused
9069            // itself just as much.
9070            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
9071
9072            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
9073
9074            // Mark the image as having been released to the WSI
9075            auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9076            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
9077            auto image_state = GetImageState(dev_data, image);
9078            image_state->acquired = false;
9079        }
9080
9081        // Note: even though presentation is directed to a queue, there is no
9082        // direct ordering between QP and subsequent work, so QP (and its
9083        // semaphore waits) /never/ participate in any completion proof.
9084    }
9085
9086    return result;
9087}
9088
9089static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
9090                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
9091                                                     std::vector<SURFACE_STATE *> &surface_state,
9092                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
9093    if (pCreateInfos) {
9094        std::lock_guard<std::mutex> lock(global_lock);
9095        for (uint32_t i = 0; i < swapchainCount; i++) {
9096            surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
9097            old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
9098            std::stringstream func_name;
9099            func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
9100            if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
9101                                                  old_swapchain_state[i])) {
9102                return true;
9103            }
9104        }
9105    }
9106    return false;
9107}
9108
9109static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
9110                                                    const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
9111                                                    std::vector<SURFACE_STATE *> &surface_state,
9112                                                    std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
9113    if (VK_SUCCESS == result) {
9114        for (uint32_t i = 0; i < swapchainCount; i++) {
9115            auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
9116            if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
9117                VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
9118                swapchain_state->shared_presentable = true;
9119            }
9120            surface_state[i]->swapchain = swapchain_state.get();
9121            dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
9122        }
9123    } else {
9124        for (uint32_t i = 0; i < swapchainCount; i++) {
9125            surface_state[i]->swapchain = nullptr;
9126        }
9127    }
9128    // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
9129    for (uint32_t i = 0; i < swapchainCount; i++) {
9130        if (old_swapchain_state[i]) {
9131            old_swapchain_state[i]->replaced = true;
9132        }
9133        surface_state[i]->old_swapchain = old_swapchain_state[i];
9134    }
9135    return;
9136}
9137
9138VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
9139                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
9140                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
9141    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9142    std::vector<SURFACE_STATE *> surface_state;
9143    std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
9144
9145    if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
9146                                                 old_swapchain_state)) {
9147        return VK_ERROR_VALIDATION_FAILED_EXT;
9148    }
9149
9150    VkResult result =
9151        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
9152
9153    PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
9154                                            old_swapchain_state);
9155
9156    return result;
9157}
9158
9159VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
9160                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
9161    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9162    bool skip = false;
9163
9164    std::unique_lock<std::mutex> lock(global_lock);
9165
9166    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
9167        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9168                        HandleToUint64(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
9169                        "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
9170                        "to determine the completion of this operation.");
9171    }
9172
9173    auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9174    if (pSemaphore && pSemaphore->signaled) {
9175        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9176                        HandleToUint64(semaphore), __LINE__, VALIDATION_ERROR_16400a0c, "DS",
9177                        "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
9178                        validation_error_map[VALIDATION_ERROR_16400a0c]);
9179    }
9180
9181    auto pFence = GetFenceNode(dev_data, fence);
9182    if (pFence) {
9183        skip |= ValidateFenceForSubmit(dev_data, pFence);
9184    }
9185
9186    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
9187
9188    if (swapchain_data->replaced) {
9189        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9190                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
9191                        "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
9192                        "present any images it has acquired, but cannot acquire any more.");
9193    }
9194
9195    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
9196    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
9197        uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
9198                                                 [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
9199        if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
9200            skip |=
9201                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9202                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
9203                        "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
9204                        acquired_images);
9205        }
9206    }
9207
9208    if (swapchain_data->images.size() == 0) {
9209        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9210                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
9211                        "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
9212                        "vkGetSwapchainImagesKHR after swapchain creation.");
9213    }
9214
9215    lock.unlock();
9216
9217    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9218
9219    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
9220
9221    lock.lock();
9222    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
9223        if (pFence) {
9224            pFence->state = FENCE_INFLIGHT;
9225            pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
9226        }
9227
9228        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
9229        if (pSemaphore) {
9230            pSemaphore->signaled = true;
9231            pSemaphore->signaler.first = VK_NULL_HANDLE;
9232        }
9233
9234        // Mark the image as acquired.
9235        auto image = swapchain_data->images[*pImageIndex];
9236        auto image_state = GetImageState(dev_data, image);
9237        image_state->acquired = true;
9238        image_state->shared_presentable = swapchain_data->shared_presentable;
9239    }
9240    lock.unlock();
9241
9242    return result;
9243}
9244
9245VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
9246                                                        VkPhysicalDevice *pPhysicalDevices) {
9247    bool skip = false;
9248    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9249    assert(instance_data);
9250
9251    // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
9252    if (NULL == pPhysicalDevices) {
9253        instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
9254    } else {
9255        if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
9256            // Flag warning here. You can call this without having queried the count, but it may not be
9257            // robust on platforms with multiple physical devices.
9258            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
9259                            0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9260                            "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
9261                            "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
9262        }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
9263        else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
9264            // Having actual count match count from app is not a requirement, so this can be a warning
9265            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9266                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9267                            "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
9268                            "supported by this instance is %u.",
9269                            *pPhysicalDeviceCount, instance_data->physical_devices_count);
9270        }
9271        instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
9272    }
9273    if (skip) {
9274        return VK_ERROR_VALIDATION_FAILED_EXT;
9275    }
9276    VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
9277    if (NULL == pPhysicalDevices) {
9278        instance_data->physical_devices_count = *pPhysicalDeviceCount;
9279    } else if (result == VK_SUCCESS) {  // Save physical devices
9280        for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
9281            auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
9282            phys_device_state.phys_device = pPhysicalDevices[i];
9283            // Init actual features for each physical device
9284            instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
9285        }
9286    }
9287    return result;
9288}
9289
9290// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
9291static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
9292                                                                 PHYSICAL_DEVICE_STATE *pd_state,
9293                                                                 uint32_t requested_queue_family_property_count, bool qfp_null,
9294                                                                 const char *caller_name) {
9295    bool skip = false;
9296    if (!qfp_null) {
9297        // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
9298        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
9299            skip |= log_msg(
9300                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9301                HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9302                "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
9303                "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
9304                caller_name, caller_name);
9305            // Then verify that pCount that is passed in on second call matches what was returned
9306        } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
9307            skip |= log_msg(
9308                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9309                HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9310                "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
9311                ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
9312                ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
9313                "previously obtained by calling %s with NULL pQueueFamilyProperties.",
9314                caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
9315        }
9316        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
9317    }
9318
9319    return skip;
9320}
9321
9322static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
9323                                                                  PHYSICAL_DEVICE_STATE *pd_state,
9324                                                                  uint32_t *pQueueFamilyPropertyCount,
9325                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
9326    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
9327                                                                (nullptr == pQueueFamilyProperties),
9328                                                                "vkGetPhysicalDeviceQueueFamilyProperties()");
9329}
9330
9331static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_layer_data *instance_data,
9332                                                                      PHYSICAL_DEVICE_STATE *pd_state,
9333                                                                      uint32_t *pQueueFamilyPropertyCount,
9334                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9335    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
9336                                                                (nullptr == pQueueFamilyProperties),
9337                                                                "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
9338}
9339
9340// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
9341static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9342                                                                    VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9343    if (!pQueueFamilyProperties) {
9344        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
9345            pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
9346        pd_state->queue_family_count = count;
9347    } else {  // Save queue family properties
9348        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
9349        pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
9350
9351        pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
9352        for (uint32_t i = 0; i < count; ++i) {
9353            pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
9354        }
9355    }
9356}
9357
9358static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9359                                                                 VkQueueFamilyProperties *pQueueFamilyProperties) {
9360    VkQueueFamilyProperties2KHR *pqfp = nullptr;
9361    std::vector<VkQueueFamilyProperties2KHR> qfp;
9362    qfp.resize(count);
9363    if (pQueueFamilyProperties) {
9364        for (uint32_t i = 0; i < count; ++i) {
9365            qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
9366            qfp[i].pNext = nullptr;
9367            qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
9368        }
9369        pqfp = qfp.data();
9370    }
9371    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
9372}
9373
9374static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9375                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9376    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
9377}
9378
9379VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
9380                                                                  uint32_t *pQueueFamilyPropertyCount,
9381                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
9382    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9383    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9384    assert(physical_device_state);
9385    std::unique_lock<std::mutex> lock(global_lock);
9386
9387    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
9388                                                                      pQueueFamilyPropertyCount, pQueueFamilyProperties);
9389
9390    lock.unlock();
9391
9392    if (skip) return;
9393
9394    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
9395                                                                         pQueueFamilyProperties);
9396
9397    lock.lock();
9398    PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
9399}
9400
9401VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
9402                                                                      uint32_t *pQueueFamilyPropertyCount,
9403                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9404    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9405    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9406    assert(physical_device_state);
9407    std::unique_lock<std::mutex> lock(global_lock);
9408
9409    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_data, physical_device_state,
9410                                                                          pQueueFamilyPropertyCount, pQueueFamilyProperties);
9411
9412    lock.unlock();
9413
9414    if (skip) return;
9415
9416    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
9417                                                                             pQueueFamilyProperties);
9418
9419    lock.lock();
9420    PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(physical_device_state, *pQueueFamilyPropertyCount,
9421                                                             pQueueFamilyProperties);
9422}
9423
9424template <typename TCreateInfo, typename FPtr>
9425static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
9426                              VkSurfaceKHR *pSurface, FPtr fptr) {
9427    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9428
9429    // Call down the call chain:
9430    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
9431
9432    if (result == VK_SUCCESS) {
9433        std::unique_lock<std::mutex> lock(global_lock);
9434        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
9435        lock.unlock();
9436    }
9437
9438    return result;
9439}
9440
9441VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
9442    bool skip = false;
9443    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9444    std::unique_lock<std::mutex> lock(global_lock);
9445    auto surface_state = GetSurfaceState(instance_data, surface);
9446
9447    if ((surface_state) && (surface_state->swapchain)) {
9448        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
9449            HandleToUint64(instance), __LINE__, VALIDATION_ERROR_26c009e4, "DS",
9450            "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed. %s",
9451            validation_error_map[VALIDATION_ERROR_26c009e4]);
9452    }
9453    instance_data->surface_map.erase(surface);
9454    lock.unlock();
9455    if (!skip) {
9456        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
9457    }
9458}
9459
9460VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
9461                                                            const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9462    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
9463}
9464
9465#ifdef VK_USE_PLATFORM_ANDROID_KHR
9466VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
9467                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9468    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
9469}
9470#endif  // VK_USE_PLATFORM_ANDROID_KHR
9471
9472#ifdef VK_USE_PLATFORM_MIR_KHR
9473VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
9474                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9475    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
9476}
9477
9478VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9479                                                                          uint32_t queueFamilyIndex, MirConnection *connection) {
9480    bool skip = false;
9481    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9482
9483    std::unique_lock<std::mutex> lock(global_lock);
9484    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9485
9486    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2d2009e2,
9487                                              "vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
9488
9489    lock.unlock();
9490
9491    if (skip) return VK_FALSE;
9492
9493    // Call down the call chain:
9494    VkBool32 result =
9495        instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
9496
9497    return result;
9498}
9499#endif  // VK_USE_PLATFORM_MIR_KHR
9500
9501#ifdef VK_USE_PLATFORM_WAYLAND_KHR
9502VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
9503                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9504    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
9505}
9506
9507VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9508                                                                              uint32_t queueFamilyIndex,
9509                                                                              struct wl_display *display) {
9510    bool skip = false;
9511    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9512
9513    std::unique_lock<std::mutex> lock(global_lock);
9514    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9515
9516    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f000a34,
9517                                              "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
9518
9519    lock.unlock();
9520
9521    if (skip) return VK_FALSE;
9522
9523    // Call down the call chain:
9524    VkBool32 result =
9525        instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
9526
9527    return result;
9528}
9529#endif  // VK_USE_PLATFORM_WAYLAND_KHR
9530
9531#ifdef VK_USE_PLATFORM_WIN32_KHR
9532VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
9533                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9534    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
9535}
9536
9537VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
9538                                                                            uint32_t queueFamilyIndex) {
9539    bool skip = false;
9540    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9541
9542    std::unique_lock<std::mutex> lock(global_lock);
9543    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9544
9545    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f200a3a,
9546                                              "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
9547
9548    lock.unlock();
9549
9550    if (skip) return VK_FALSE;
9551
9552    // Call down the call chain:
9553    VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
9554
9555    return result;
9556}
9557#endif  // VK_USE_PLATFORM_WIN32_KHR
9558
9559#ifdef VK_USE_PLATFORM_XCB_KHR
9560VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
9561                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9562    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
9563}
9564
9565VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9566                                                                          uint32_t queueFamilyIndex, xcb_connection_t *connection,
9567                                                                          xcb_visualid_t visual_id) {
9568    bool skip = false;
9569    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9570
9571    std::unique_lock<std::mutex> lock(global_lock);
9572    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9573
9574    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f400a40,
9575                                              "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
9576
9577    lock.unlock();
9578
9579    if (skip) return VK_FALSE;
9580
9581    // Call down the call chain:
9582    VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
9583                                                                                               connection, visual_id);
9584
9585    return result;
9586}
9587#endif  // VK_USE_PLATFORM_XCB_KHR
9588
9589#ifdef VK_USE_PLATFORM_XLIB_KHR
9590VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
9591                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9592    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
9593}
9594
9595VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9596                                                                           uint32_t queueFamilyIndex, Display *dpy,
9597                                                                           VisualID visualID) {
9598    bool skip = false;
9599    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9600
9601    std::unique_lock<std::mutex> lock(global_lock);
9602    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9603
9604    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f600a46,
9605                                              "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
9606
9607    lock.unlock();
9608
9609    if (skip) return VK_FALSE;
9610
9611    // Call down the call chain:
9612    VkBool32 result =
9613        instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
9614
9615    return result;
9616}
9617#endif  // VK_USE_PLATFORM_XLIB_KHR
9618
9619VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9620                                                                       VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
9621    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9622
9623    std::unique_lock<std::mutex> lock(global_lock);
9624    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9625    lock.unlock();
9626
9627    auto result =
9628        instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
9629
9630    if (result == VK_SUCCESS) {
9631        physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9632        physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
9633    }
9634
9635    return result;
9636}
9637
9638static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
9639                                                                   VkPhysicalDevice physicalDevice,
9640                                                                   VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
9641    std::unique_lock<std::mutex> lock(global_lock);
9642    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9643    physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9644    physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
9645}
9646
9647VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
9648                                                                        const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
9649                                                                        VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
9650    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9651
9652    auto result =
9653        instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
9654
9655    if (result == VK_SUCCESS) {
9656        PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
9657    }
9658
9659    return result;
9660}
9661
9662static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
9663                                                                   VkPhysicalDevice physicalDevice,
9664                                                                   VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
9665    std::unique_lock<std::mutex> lock(global_lock);
9666    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9667    physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9668    physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
9669    physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
9670    physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
9671    physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
9672    physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
9673    physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
9674    physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
9675    physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
9676    physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
9677    physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
9678}
9679
9680VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9681                                                                        VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
9682    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9683
9684    auto result =
9685        instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
9686
9687    if (result == VK_SUCCESS) {
9688        PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
9689    }
9690
9691    return result;
9692}
9693
9694VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
9695                                                                  VkSurfaceKHR surface, VkBool32 *pSupported) {
9696    bool skip = false;
9697    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9698
9699    std::unique_lock<std::mutex> lock(global_lock);
9700    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9701    auto surface_state = GetSurfaceState(instance_data, surface);
9702
9703    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2ee009ea,
9704                                              "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
9705
9706    lock.unlock();
9707
9708    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9709
9710    auto result =
9711        instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
9712
9713    if (result == VK_SUCCESS) {
9714        surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
9715    }
9716
9717    return result;
9718}
9719
9720VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9721                                                                       uint32_t *pPresentModeCount,
9722                                                                       VkPresentModeKHR *pPresentModes) {
9723    bool skip = false;
9724    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9725    std::unique_lock<std::mutex> lock(global_lock);
9726    // TODO: this isn't quite right. available modes may differ by surface AND physical device.
9727    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9728    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
9729
9730    if (pPresentModes) {
9731        // Compare the preliminary value of *pPresentModeCount with the value this time:
9732        auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
9733        switch (call_state) {
9734            case UNCALLED:
9735                skip |= log_msg(
9736                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9737                    HandleToUint64(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
9738                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
9739                    "value has been seen for pPresentModeCount.");
9740                break;
9741            default:
9742                // both query count and query details
9743                if (*pPresentModeCount != prev_mode_count) {
9744                    skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9745                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
9746                                    DEVLIMITS_COUNT_MISMATCH, "DL",
9747                                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that "
9748                                    "differs from the value "
9749                                    "(%u) that was returned when pPresentModes was NULL.",
9750                                    *pPresentModeCount, prev_mode_count);
9751                }
9752                break;
9753        }
9754    }
9755    lock.unlock();
9756
9757    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9758
9759    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
9760                                                                                        pPresentModes);
9761
9762    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9763        lock.lock();
9764
9765        if (*pPresentModeCount) {
9766            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
9767            if (*pPresentModeCount > physical_device_state->present_modes.size())
9768                physical_device_state->present_modes.resize(*pPresentModeCount);
9769        }
9770        if (pPresentModes) {
9771            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
9772            for (uint32_t i = 0; i < *pPresentModeCount; i++) {
9773                physical_device_state->present_modes[i] = pPresentModes[i];
9774            }
9775        }
9776    }
9777
9778    return result;
9779}
9780
9781VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9782                                                                  uint32_t *pSurfaceFormatCount,
9783                                                                  VkSurfaceFormatKHR *pSurfaceFormats) {
9784    bool skip = false;
9785    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9786    std::unique_lock<std::mutex> lock(global_lock);
9787    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9788    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
9789
9790    if (pSurfaceFormats) {
9791        auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
9792
9793        switch (call_state) {
9794            case UNCALLED:
9795                // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
9796                // didn't
9797                // previously call this function with a NULL value of pSurfaceFormats:
9798                skip |= log_msg(
9799                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9800                    HandleToUint64(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
9801                    "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
9802                    "value has been seen for pSurfaceFormats.");
9803                break;
9804            default:
9805                if (prev_format_count != *pSurfaceFormatCount) {
9806                    skip |= log_msg(
9807                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9808                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
9809                        DEVLIMITS_COUNT_MISMATCH, "DL",
9810                        "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats "
9811                        "set "
9812                        "to "
9813                        "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
9814                        *pSurfaceFormatCount, prev_format_count);
9815                }
9816                break;
9817        }
9818    }
9819    lock.unlock();
9820
9821    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9822
9823    // Call down the call chain:
9824    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
9825                                                                                   pSurfaceFormats);
9826
9827    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9828        lock.lock();
9829
9830        if (*pSurfaceFormatCount) {
9831            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
9832            if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
9833                physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
9834        }
9835        if (pSurfaceFormats) {
9836            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
9837            for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
9838                physical_device_state->surface_formats[i] = pSurfaceFormats[i];
9839            }
9840        }
9841    }
9842    return result;
9843}
9844
9845static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
9846                                                              uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
9847    std::unique_lock<std::mutex> lock(global_lock);
9848    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9849    if (*pSurfaceFormatCount) {
9850        if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
9851            physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
9852        }
9853        if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
9854            physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
9855    }
9856    if (pSurfaceFormats) {
9857        if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
9858            physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
9859        }
9860        for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
9861            physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
9862        }
9863    }
9864}
9865
9866VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
9867                                                                   const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
9868                                                                   uint32_t *pSurfaceFormatCount,
9869                                                                   VkSurfaceFormat2KHR *pSurfaceFormats) {
9870    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9871    auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
9872                                                                                   pSurfaceFormatCount, pSurfaceFormats);
9873    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9874        PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
9875    }
9876    return result;
9877}
9878
9879VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
9880                                                            const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
9881                                                            const VkAllocationCallbacks *pAllocator,
9882                                                            VkDebugReportCallbackEXT *pMsgCallback) {
9883    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9884    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
9885    if (VK_SUCCESS == res) {
9886        std::lock_guard<std::mutex> lock(global_lock);
9887        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
9888    }
9889    return res;
9890}
9891
9892VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
9893                                                         const VkAllocationCallbacks *pAllocator) {
9894    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9895    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
9896    std::lock_guard<std::mutex> lock(global_lock);
9897    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
9898}
9899
9900VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
9901                                                 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
9902                                                 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
9903    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9904    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
9905}
9906
9907VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
9908    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
9909}
9910
9911VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
9912                                                              VkLayerProperties *pProperties) {
9913    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
9914}
9915
9916VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
9917                                                                    VkExtensionProperties *pProperties) {
9918    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
9919        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
9920
9921    return VK_ERROR_LAYER_NOT_PRESENT;
9922}
9923
9924VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
9925                                                                  uint32_t *pCount, VkExtensionProperties *pProperties) {
9926    if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(0, NULL, pCount, pProperties);
9927
9928    assert(physicalDevice);
9929
9930    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9931    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
9932}
9933
9934VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHX(
9935    VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHX *pPhysicalDeviceGroupProperties) {
9936    bool skip = false;
9937    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9938
9939    if (instance_data) {
9940        // For this instance, flag when EnumeratePhysicalDeviceGroupsKHX goes to QUERY_COUNT and then QUERY_DETAILS.
9941        if (NULL == pPhysicalDeviceGroupProperties) {
9942            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
9943        } else {
9944            if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
9945                // Flag warning here. You can call this without having queried the count, but it may not be
9946                // robust on platforms with multiple physical devices.
9947                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9948                                VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9949                                "Call sequence has vkEnumeratePhysicalDeviceGroupsKHX() w/ non-NULL "
9950                                "pPhysicalDeviceGroupProperties. You should first "
9951                                "call vkEnumeratePhysicalDeviceGroupsKHX() w/ NULL pPhysicalDeviceGroupProperties to query "
9952                                "pPhysicalDeviceGroupCount.");
9953            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
9954            else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
9955                // Having actual count match count from app is not a requirement, so this can be a warning
9956                skip |=
9957                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9958                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9959                            "Call to vkEnumeratePhysicalDeviceGroupsKHX() w/ pPhysicalDeviceGroupCount value %u, but actual count "
9960                            "supported by this instance is %u.",
9961                            *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
9962            }
9963            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
9964        }
9965        if (skip) {
9966            return VK_ERROR_VALIDATION_FAILED_EXT;
9967        }
9968        VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHX(instance, pPhysicalDeviceGroupCount,
9969            pPhysicalDeviceGroupProperties);
9970        if (NULL == pPhysicalDeviceGroupProperties) {
9971            instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
9972        } else if (result == VK_SUCCESS) { // Save physical devices
9973            for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
9974                for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
9975                    VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
9976                    auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
9977                    phys_device_state.phys_device = cur_phys_dev;
9978                    // Init actual features for each physical device
9979                    instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
9980                }
9981            }
9982        }
9983        return result;
9984    } else {
9985        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
9986                DEVLIMITS_INVALID_INSTANCE, "DL",
9987                "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDeviceGroupsKHX().",
9988                HandleToUint64(instance));
9989    }
9990    return VK_ERROR_VALIDATION_FAILED_EXT;
9991}
9992
9993VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
9994                                                                 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
9995                                                                 const VkAllocationCallbacks *pAllocator,
9996                                                                 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
9997    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9998    VkResult result =
9999        dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
10000    if (VK_SUCCESS == result) {
10001        std::lock_guard<std::mutex> lock(global_lock);
10002        // Shadow template createInfo for later updates
10003        safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info =
10004            new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo);
10005        std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
10006        dev_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
10007    }
10008    return result;
10009}
10010
10011VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
10012                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10013                                                              const VkAllocationCallbacks *pAllocator) {
10014    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10015    std::unique_lock<std::mutex> lock(global_lock);
10016    dev_data->desc_template_map.erase(descriptorUpdateTemplate);
10017    lock.unlock();
10018    dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
10019}
10020
10021// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
10022static void PostCallRecordUpdateDescriptorSetWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
10023                                                             VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10024                                                             const void *pData) {
10025    auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
10026    if (template_map_entry == device_data->desc_template_map.end()) {
10027        assert(0);
10028    }
10029
10030    cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
10031}
10032
10033VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
10034                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10035                                                              const void *pData) {
10036    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10037    device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
10038
10039    PostCallRecordUpdateDescriptorSetWithTemplateKHR(device_data, descriptorSet, descriptorUpdateTemplate, pData);
10040}
10041
10042VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
10043                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10044                                                               VkPipelineLayout layout, uint32_t set, const void *pData) {
10045    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10046    dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
10047}
10048
10049static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
10050                                                                     VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
10051                                                                     VkDisplayPlanePropertiesKHR *pProperties) {
10052    std::unique_lock<std::mutex> lock(global_lock);
10053    auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
10054
10055    if (*pPropertyCount) {
10056        if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
10057            physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
10058        }
10059        physical_device_state->display_plane_property_count = *pPropertyCount;
10060    }
10061    if (pProperties) {
10062        if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
10063            physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
10064        }
10065    }
10066}
10067
10068VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
10069                                                                          VkDisplayPlanePropertiesKHR *pProperties) {
10070    VkResult result = VK_SUCCESS;
10071    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10072
10073    result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
10074
10075    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
10076        PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
10077    }
10078
10079    return result;
10080}
10081
10082static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
10083                                                                    VkPhysicalDevice physicalDevice, uint32_t planeIndex,
10084                                                                    const char *api_name) {
10085    bool skip = false;
10086    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
10087    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState == UNCALLED) {
10088        skip |= log_msg(
10089            instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10090            HandleToUint64(physicalDevice), __LINE__, SWAPCHAIN_GET_SUPPORTED_DISPLAYS_WITHOUT_QUERY, "DL",
10091            "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR.", api_name);
10092    } else {
10093        if (planeIndex >= physical_device_state->display_plane_property_count) {
10094            skip |= log_msg(
10095                instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10096                HandleToUint64(physicalDevice), __LINE__, VALIDATION_ERROR_29c009c2, "DL",
10097                "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR. "
10098                "Do you have the plane index hardcoded? %s",
10099                api_name, physical_device_state->display_plane_property_count - 1, validation_error_map[VALIDATION_ERROR_29c009c2]);
10100        }
10101    }
10102    return skip;
10103}
10104
10105static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
10106                                                               uint32_t planeIndex) {
10107    bool skip = false;
10108    std::lock_guard<std::mutex> lock(global_lock);
10109    skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
10110                                                                    "vkGetDisplayPlaneSupportedDisplaysKHR");
10111    return skip;
10112}
10113
10114VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
10115                                                                   uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
10116    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10117    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10118    bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
10119    if (!skip) {
10120        result =
10121            instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
10122    }
10123    return result;
10124}
10125
10126static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
10127                                                          uint32_t planeIndex) {
10128    bool skip = false;
10129    std::lock_guard<std::mutex> lock(global_lock);
10130    skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
10131                                                                    "vkGetDisplayPlaneCapabilitiesKHR");
10132    return skip;
10133}
10134
10135VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
10136                                                              uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
10137    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10138    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10139    bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
10140
10141    if (!skip) {
10142        result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
10143    }
10144
10145    return result;
10146}
10147
10148VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
10149VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
10150VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
10151
10152// Map of all APIs to be intercepted by this layer
10153static const std::unordered_map<std::string, void*> name_to_funcptr_map = {
10154    {"vkGetInstanceProcAddr", (void*)GetInstanceProcAddr},
10155    {"vk_layerGetPhysicalDeviceProcAddr", (void*)GetPhysicalDeviceProcAddr},
10156    {"vkGetDeviceProcAddr", (void*)GetDeviceProcAddr},
10157    {"vkCreateInstance", (void*)CreateInstance},
10158    {"vkCreateDevice", (void*)CreateDevice},
10159    {"vkEnumeratePhysicalDevices", (void*)EnumeratePhysicalDevices},
10160    {"vkGetPhysicalDeviceQueueFamilyProperties", (void*)GetPhysicalDeviceQueueFamilyProperties},
10161    {"vkDestroyInstance", (void*)DestroyInstance},
10162    {"vkEnumerateInstanceLayerProperties", (void*)EnumerateInstanceLayerProperties},
10163    {"vkEnumerateDeviceLayerProperties", (void*)EnumerateDeviceLayerProperties},
10164    {"vkEnumerateInstanceExtensionProperties", (void*)EnumerateInstanceExtensionProperties},
10165    {"vkEnumerateDeviceExtensionProperties", (void*)EnumerateDeviceExtensionProperties},
10166    {"vkCreateDescriptorUpdateTemplateKHR", (void*)CreateDescriptorUpdateTemplateKHR},
10167    {"vkDestroyDescriptorUpdateTemplateKHR", (void*)DestroyDescriptorUpdateTemplateKHR},
10168    {"vkUpdateDescriptorSetWithTemplateKHR", (void*)UpdateDescriptorSetWithTemplateKHR},
10169    {"vkCmdPushDescriptorSetWithTemplateKHR", (void*)CmdPushDescriptorSetWithTemplateKHR},
10170    {"vkCreateSwapchainKHR", (void*)CreateSwapchainKHR},
10171    {"vkDestroySwapchainKHR", (void*)DestroySwapchainKHR},
10172    {"vkGetSwapchainImagesKHR", (void*)GetSwapchainImagesKHR},
10173    {"vkAcquireNextImageKHR", (void*)AcquireNextImageKHR},
10174    {"vkQueuePresentKHR", (void*)QueuePresentKHR},
10175    {"vkQueueSubmit", (void*)QueueSubmit},
10176    {"vkWaitForFences", (void*)WaitForFences},
10177    {"vkGetFenceStatus", (void*)GetFenceStatus},
10178    {"vkQueueWaitIdle", (void*)QueueWaitIdle},
10179    {"vkDeviceWaitIdle", (void*)DeviceWaitIdle},
10180    {"vkGetDeviceQueue", (void*)GetDeviceQueue},
10181    {"vkDestroyDevice", (void*)DestroyDevice},
10182    {"vkDestroyFence", (void*)DestroyFence},
10183    {"vkResetFences", (void*)ResetFences},
10184    {"vkDestroySemaphore", (void*)DestroySemaphore},
10185    {"vkDestroyEvent", (void*)DestroyEvent},
10186    {"vkDestroyQueryPool", (void*)DestroyQueryPool},
10187    {"vkDestroyBuffer", (void*)DestroyBuffer},
10188    {"vkDestroyBufferView", (void*)DestroyBufferView},
10189    {"vkDestroyImage", (void*)DestroyImage},
10190    {"vkDestroyImageView", (void*)DestroyImageView},
10191    {"vkDestroyShaderModule", (void*)DestroyShaderModule},
10192    {"vkDestroyPipeline", (void*)DestroyPipeline},
10193    {"vkDestroyPipelineLayout", (void*)DestroyPipelineLayout},
10194    {"vkDestroySampler", (void*)DestroySampler},
10195    {"vkDestroyDescriptorSetLayout", (void*)DestroyDescriptorSetLayout},
10196    {"vkDestroyDescriptorPool", (void*)DestroyDescriptorPool},
10197    {"vkDestroyFramebuffer", (void*)DestroyFramebuffer},
10198    {"vkDestroyRenderPass", (void*)DestroyRenderPass},
10199    {"vkCreateBuffer", (void*)CreateBuffer},
10200    {"vkCreateBufferView", (void*)CreateBufferView},
10201    {"vkCreateImage", (void*)CreateImage},
10202    {"vkCreateImageView", (void*)CreateImageView},
10203    {"vkCreateFence", (void*)CreateFence},
10204    {"vkCreatePipelineCache", (void*)CreatePipelineCache},
10205    {"vkDestroyPipelineCache", (void*)DestroyPipelineCache},
10206    {"vkGetPipelineCacheData", (void*)GetPipelineCacheData},
10207    {"vkMergePipelineCaches", (void*)MergePipelineCaches},
10208    {"vkCreateGraphicsPipelines", (void*)CreateGraphicsPipelines},
10209    {"vkCreateComputePipelines", (void*)CreateComputePipelines},
10210    {"vkCreateSampler", (void*)CreateSampler},
10211    {"vkCreateDescriptorSetLayout", (void*)CreateDescriptorSetLayout},
10212    {"vkCreatePipelineLayout", (void*)CreatePipelineLayout},
10213    {"vkCreateDescriptorPool", (void*)CreateDescriptorPool},
10214    {"vkResetDescriptorPool", (void*)ResetDescriptorPool},
10215    {"vkAllocateDescriptorSets", (void*)AllocateDescriptorSets},
10216    {"vkFreeDescriptorSets", (void*)FreeDescriptorSets},
10217    {"vkUpdateDescriptorSets", (void*)UpdateDescriptorSets},
10218    {"vkCreateCommandPool", (void*)CreateCommandPool},
10219    {"vkDestroyCommandPool", (void*)DestroyCommandPool},
10220    {"vkResetCommandPool", (void*)ResetCommandPool},
10221    {"vkCreateQueryPool", (void*)CreateQueryPool},
10222    {"vkAllocateCommandBuffers", (void*)AllocateCommandBuffers},
10223    {"vkFreeCommandBuffers", (void*)FreeCommandBuffers},
10224    {"vkBeginCommandBuffer", (void*)BeginCommandBuffer},
10225    {"vkEndCommandBuffer", (void*)EndCommandBuffer},
10226    {"vkResetCommandBuffer", (void*)ResetCommandBuffer},
10227    {"vkCmdBindPipeline", (void*)CmdBindPipeline},
10228    {"vkCmdSetViewport", (void*)CmdSetViewport},
10229    {"vkCmdSetScissor", (void*)CmdSetScissor},
10230    {"vkCmdSetLineWidth", (void*)CmdSetLineWidth},
10231    {"vkCmdSetDepthBias", (void*)CmdSetDepthBias},
10232    {"vkCmdSetBlendConstants", (void*)CmdSetBlendConstants},
10233    {"vkCmdSetDepthBounds", (void*)CmdSetDepthBounds},
10234    {"vkCmdSetStencilCompareMask", (void*)CmdSetStencilCompareMask},
10235    {"vkCmdSetStencilWriteMask", (void*)CmdSetStencilWriteMask},
10236    {"vkCmdSetStencilReference", (void*)CmdSetStencilReference},
10237    {"vkCmdBindDescriptorSets", (void*)CmdBindDescriptorSets},
10238    {"vkCmdBindVertexBuffers", (void*)CmdBindVertexBuffers},
10239    {"vkCmdBindIndexBuffer", (void*)CmdBindIndexBuffer},
10240    {"vkCmdDraw", (void*)CmdDraw},
10241    {"vkCmdDrawIndexed", (void*)CmdDrawIndexed},
10242    {"vkCmdDrawIndirect", (void*)CmdDrawIndirect},
10243    {"vkCmdDrawIndexedIndirect", (void*)CmdDrawIndexedIndirect},
10244    {"vkCmdDispatch", (void*)CmdDispatch},
10245    {"vkCmdDispatchIndirect", (void*)CmdDispatchIndirect},
10246    {"vkCmdCopyBuffer", (void*)CmdCopyBuffer},
10247    {"vkCmdCopyImage", (void*)CmdCopyImage},
10248    {"vkCmdBlitImage", (void*)CmdBlitImage},
10249    {"vkCmdCopyBufferToImage", (void*)CmdCopyBufferToImage},
10250    {"vkCmdCopyImageToBuffer", (void*)CmdCopyImageToBuffer},
10251    {"vkCmdUpdateBuffer", (void*)CmdUpdateBuffer},
10252    {"vkCmdFillBuffer", (void*)CmdFillBuffer},
10253    {"vkCmdClearColorImage", (void*)CmdClearColorImage},
10254    {"vkCmdClearDepthStencilImage", (void*)CmdClearDepthStencilImage},
10255    {"vkCmdClearAttachments", (void*)CmdClearAttachments},
10256    {"vkCmdResolveImage", (void*)CmdResolveImage},
10257    {"vkGetImageSubresourceLayout", (void*)GetImageSubresourceLayout},
10258    {"vkCmdSetEvent", (void*)CmdSetEvent},
10259    {"vkCmdResetEvent", (void*)CmdResetEvent},
10260    {"vkCmdWaitEvents", (void*)CmdWaitEvents},
10261    {"vkCmdPipelineBarrier", (void*)CmdPipelineBarrier},
10262    {"vkCmdBeginQuery", (void*)CmdBeginQuery},
10263    {"vkCmdEndQuery", (void*)CmdEndQuery},
10264    {"vkCmdResetQueryPool", (void*)CmdResetQueryPool},
10265    {"vkCmdCopyQueryPoolResults", (void*)CmdCopyQueryPoolResults},
10266    {"vkCmdPushConstants", (void*)CmdPushConstants},
10267    {"vkCmdWriteTimestamp", (void*)CmdWriteTimestamp},
10268    {"vkCreateFramebuffer", (void*)CreateFramebuffer},
10269    {"vkCreateShaderModule", (void*)CreateShaderModule},
10270    {"vkCreateRenderPass", (void*)CreateRenderPass},
10271    {"vkCmdBeginRenderPass", (void*)CmdBeginRenderPass},
10272    {"vkCmdNextSubpass", (void*)CmdNextSubpass},
10273    {"vkCmdEndRenderPass", (void*)CmdEndRenderPass},
10274    {"vkCmdExecuteCommands", (void*)CmdExecuteCommands},
10275    {"vkSetEvent", (void*)SetEvent},
10276    {"vkMapMemory", (void*)MapMemory},
10277    {"vkUnmapMemory", (void*)UnmapMemory},
10278    {"vkFlushMappedMemoryRanges", (void*)FlushMappedMemoryRanges},
10279    {"vkInvalidateMappedMemoryRanges", (void*)InvalidateMappedMemoryRanges},
10280    {"vkAllocateMemory", (void*)AllocateMemory},
10281    {"vkFreeMemory", (void*)FreeMemory},
10282    {"vkBindBufferMemory", (void*)BindBufferMemory},
10283    {"vkGetBufferMemoryRequirements", (void*)GetBufferMemoryRequirements},
10284    {"vkGetImageMemoryRequirements", (void*)GetImageMemoryRequirements},
10285    {"vkGetQueryPoolResults", (void*)GetQueryPoolResults},
10286    {"vkBindImageMemory", (void*)BindImageMemory},
10287    {"vkQueueBindSparse", (void*)QueueBindSparse},
10288    {"vkCreateSemaphore", (void*)CreateSemaphore},
10289    {"vkCreateEvent", (void*)CreateEvent},
10290#ifdef VK_USE_PLATFORM_ANDROID_KHR
10291    {"vkCreateAndroidSurfaceKHR", (void*)CreateAndroidSurfaceKHR},
10292#endif
10293#ifdef VK_USE_PLATFORM_MIR_KHR
10294    {"vkCreateMirSurfaceKHR", (void*)CreateMirSurfaceKHR},
10295    {"vkGetPhysicalDeviceMirPresentationSupportKHR", (void*)GetPhysicalDeviceMirPresentationSupportKHR},
10296#endif
10297#ifdef VK_USE_PLATFORM_WAYLAND_KHR
10298    {"vkCreateWaylandSurfaceKHR", (void*)CreateWaylandSurfaceKHR},
10299    {"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void*)GetPhysicalDeviceWaylandPresentationSupportKHR},
10300#endif
10301#ifdef VK_USE_PLATFORM_WIN32_KHR
10302    {"vkCreateWin32SurfaceKHR", (void*)CreateWin32SurfaceKHR},
10303    {"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void*)GetPhysicalDeviceWin32PresentationSupportKHR},
10304#endif
10305#ifdef VK_USE_PLATFORM_XCB_KHR
10306    {"vkCreateXcbSurfaceKHR", (void*)CreateXcbSurfaceKHR},
10307    {"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void*)GetPhysicalDeviceXcbPresentationSupportKHR},
10308#endif
10309#ifdef VK_USE_PLATFORM_XLIB_KHR
10310    {"vkCreateXlibSurfaceKHR", (void*)CreateXlibSurfaceKHR},
10311    {"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void*)GetPhysicalDeviceXlibPresentationSupportKHR},
10312#endif
10313    {"vkCreateDisplayPlaneSurfaceKHR", (void*)CreateDisplayPlaneSurfaceKHR},
10314    {"vkDestroySurfaceKHR", (void*)DestroySurfaceKHR},
10315    {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void*)GetPhysicalDeviceSurfaceCapabilitiesKHR},
10316    {"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void*)GetPhysicalDeviceSurfaceCapabilities2KHR},
10317    {"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void*)GetPhysicalDeviceSurfaceCapabilities2EXT},
10318    {"vkGetPhysicalDeviceSurfaceSupportKHR", (void*)GetPhysicalDeviceSurfaceSupportKHR},
10319    {"vkGetPhysicalDeviceSurfacePresentModesKHR", (void*)GetPhysicalDeviceSurfacePresentModesKHR},
10320    {"vkGetPhysicalDeviceSurfaceFormatsKHR", (void*)GetPhysicalDeviceSurfaceFormatsKHR},
10321    {"vkGetPhysicalDeviceSurfaceFormats2KHR", (void*)GetPhysicalDeviceSurfaceFormats2KHR},
10322    {"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void*)GetPhysicalDeviceQueueFamilyProperties2KHR},
10323    {"vkEnumeratePhysicalDeviceGroupsKHX", (void*)EnumeratePhysicalDeviceGroupsKHX},
10324    {"vkCreateDebugReportCallbackEXT", (void*)CreateDebugReportCallbackEXT},
10325    {"vkDestroyDebugReportCallbackEXT", (void*)DestroyDebugReportCallbackEXT},
10326    {"vkDebugReportMessageEXT", (void*)DebugReportMessageEXT},
10327    {"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void*)GetPhysicalDeviceDisplayPlanePropertiesKHR},
10328    {"GetDisplayPlaneSupportedDisplaysKHR", (void*)GetDisplayPlaneSupportedDisplaysKHR},
10329    {"GetDisplayPlaneCapabilitiesKHR", (void*)GetDisplayPlaneCapabilitiesKHR},
10330};
10331
10332VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
10333    assert(device);
10334    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10335
10336    // Is API to be intercepted by this layer?
10337    const auto &item = name_to_funcptr_map.find(funcName);
10338    if (item != name_to_funcptr_map.end()) {
10339        return reinterpret_cast<PFN_vkVoidFunction>(item->second);
10340    }
10341
10342    auto &table = device_data->dispatch_table;
10343    if (!table.GetDeviceProcAddr) return nullptr;
10344    return table.GetDeviceProcAddr(device, funcName);
10345}
10346
10347VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
10348    instance_layer_data *instance_data;
10349    // Is API to be intercepted by this layer?
10350    const auto &item = name_to_funcptr_map.find(funcName);
10351    if (item != name_to_funcptr_map.end()) {
10352        return reinterpret_cast<PFN_vkVoidFunction>(item->second);
10353    }
10354
10355    instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10356    auto &table = instance_data->dispatch_table;
10357    if (!table.GetInstanceProcAddr) return nullptr;
10358    return table.GetInstanceProcAddr(instance, funcName);
10359}
10360
10361VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
10362    assert(instance);
10363    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10364
10365    auto &table = instance_data->dispatch_table;
10366    if (!table.GetPhysicalDeviceProcAddr) return nullptr;
10367    return table.GetPhysicalDeviceProcAddr(instance, funcName);
10368}
10369
10370}  // namespace core_validation
10371
10372// loader-layer interface v0, just wrappers since there is only a layer
10373
10374VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
10375                                                                                      VkExtensionProperties *pProperties) {
10376    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
10377}
10378
10379VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
10380                                                                                  VkLayerProperties *pProperties) {
10381    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
10382}
10383
10384VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
10385                                                                                VkLayerProperties *pProperties) {
10386    // the layer command handles VK_NULL_HANDLE just fine internally
10387    assert(physicalDevice == VK_NULL_HANDLE);
10388    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
10389}
10390
10391VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10392                                                                                    const char *pLayerName, uint32_t *pCount,
10393                                                                                    VkExtensionProperties *pProperties) {
10394    // the layer command handles VK_NULL_HANDLE just fine internally
10395    assert(physicalDevice == VK_NULL_HANDLE);
10396    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10397}
10398
10399VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10400    return core_validation::GetDeviceProcAddr(dev, funcName);
10401}
10402
10403VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10404    return core_validation::GetInstanceProcAddr(instance, funcName);
10405}
10406
10407VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
10408                                                                                           const char *funcName) {
10409    return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
10410}
10411
10412VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
10413    assert(pVersionStruct != NULL);
10414    assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
10415
10416    // Fill in the function pointers if our version is at least capable of having the structure contain them.
10417    if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
10418        pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
10419        pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
10420        pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
10421    }
10422
10423    if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
10424        core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
10425    } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
10426        pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
10427    }
10428
10429    return VK_SUCCESS;
10430}
10431