core_validation.cpp revision c1c43cc01e333f3762c65ce4ad926c33235583d2
1/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
34 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
39#include <algorithm>
40#include <assert.h>
41#include <iostream>
42#include <list>
43#include <map>
44#include <memory>
45#include <mutex>
46#include <set>
47#include <sstream>
48#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
51#include <string>
52#include <inttypes.h>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_enum_string_helper.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "core_validation.h"
64#include "buffer_validation.h"
65#include "shader_validation.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_extension_utils.h"
69#include "vk_layer_utils.h"
70
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
75#define LOGCONSOLE(...)      \
76    {                        \
77        printf(__VA_ARGS__); \
78        printf("\n");        \
79    }
80#endif
81
82// TODO: remove on NDK update (r15 will probably have proper STL impl)
83#ifdef __ANDROID__
84namespace std {
85
86template <typename T>
87std::string to_string(T var) {
88    std::ostringstream ss;
89    ss << var;
90    return ss.str();
91}
92}
93#endif
94
95// This intentionally includes a cpp file
96#include "vk_safe_struct.cpp"
97
98using mutex_t = std::mutex;
99using lock_guard_t = std::lock_guard<mutex_t>;
100using unique_lock_t = std::unique_lock<mutex_t>;
101
102namespace core_validation {
103
104using std::unordered_map;
105using std::unordered_set;
106using std::unique_ptr;
107using std::vector;
108using std::string;
109using std::stringstream;
110using std::max;
111
112// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
113// Object value will be used to identify them internally.
114static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
115// 2nd special memory handle used to flag object as unbound from memory
116static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
117
118// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
119// by the extent of a swapchain targeting the surface.
120static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
121
122struct instance_layer_data {
123    VkInstance instance = VK_NULL_HANDLE;
124    debug_report_data *report_data = nullptr;
125    std::vector<VkDebugReportCallbackEXT> logging_callback;
126    VkLayerInstanceDispatchTable dispatch_table;
127
128    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
129    uint32_t physical_devices_count = 0;
130    CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
131    uint32_t physical_device_groups_count = 0;
132    CHECK_DISABLED disabled = {};
133
134    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
135    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
136
137    InstanceExtensions extensions;
138};
139
140struct layer_data {
141    debug_report_data *report_data = nullptr;
142    VkLayerDispatchTable dispatch_table;
143
144    DeviceExtensions extensions = {};
145    unordered_set<VkQueue> queues;  // All queues under given device
146    // Layer specific data
147    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
148    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
149    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
150    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
151    unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
152    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
153    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
154    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
155    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
156    unordered_map<VkDescriptorSetLayout, std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> descriptorSetLayoutMap;
157    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
158    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
159    unordered_map<VkFence, FENCE_NODE> fenceMap;
160    unordered_map<VkQueue, QUEUE_STATE> queueMap;
161    unordered_map<VkEvent, EVENT_STATE> eventMap;
162    unordered_map<QueryObject, bool> queryToStateMap;
163    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
164    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
165    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
166    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
167    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
168    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
169    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
170    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
171    unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
172    unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
173
174    VkDevice device = VK_NULL_HANDLE;
175    VkPhysicalDevice physical_device = VK_NULL_HANDLE;
176
177    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
178
179    VkPhysicalDeviceFeatures enabled_features = {};
180    // Device specific data
181    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
182    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
183    VkPhysicalDeviceProperties phys_dev_props = {};
184};
185
186// TODO : Do we need to guard access to layer_data_map w/ lock?
187static unordered_map<void *, layer_data *> layer_data_map;
188static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
189
190static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
191
192static const VkLayerProperties global_layer = {
193    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
194};
195
196template <class TCreateInfo>
197void ValidateLayerOrdering(const TCreateInfo &createInfo) {
198    bool foundLayer = false;
199    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
200        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
201            foundLayer = true;
202        }
203        // This has to be logged to console as we don't have a callback at this point.
204        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
205            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
206        }
207    }
208}
209
210// TODO : This can be much smarter, using separate locks for separate global data
211static mutex_t global_lock;
212
213// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
214IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
215    auto iv_it = dev_data->imageViewMap.find(image_view);
216    if (iv_it == dev_data->imageViewMap.end()) {
217        return nullptr;
218    }
219    return iv_it->second.get();
220}
221// Return sampler node ptr for specified sampler or else NULL
222SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
223    auto sampler_it = dev_data->samplerMap.find(sampler);
224    if (sampler_it == dev_data->samplerMap.end()) {
225        return nullptr;
226    }
227    return sampler_it->second.get();
228}
229// Return image state ptr for specified image or else NULL
230IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
231    auto img_it = dev_data->imageMap.find(image);
232    if (img_it == dev_data->imageMap.end()) {
233        return nullptr;
234    }
235    return img_it->second.get();
236}
237// Return buffer state ptr for specified buffer or else NULL
238BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
239    auto buff_it = dev_data->bufferMap.find(buffer);
240    if (buff_it == dev_data->bufferMap.end()) {
241        return nullptr;
242    }
243    return buff_it->second.get();
244}
245// Return swapchain node for specified swapchain or else NULL
246SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
247    auto swp_it = dev_data->swapchainMap.find(swapchain);
248    if (swp_it == dev_data->swapchainMap.end()) {
249        return nullptr;
250    }
251    return swp_it->second.get();
252}
253// Return buffer node ptr for specified buffer or else NULL
254BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
255    auto bv_it = dev_data->bufferViewMap.find(buffer_view);
256    if (bv_it == dev_data->bufferViewMap.end()) {
257        return nullptr;
258    }
259    return bv_it->second.get();
260}
261
262FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
263    auto it = dev_data->fenceMap.find(fence);
264    if (it == dev_data->fenceMap.end()) {
265        return nullptr;
266    }
267    return &it->second;
268}
269
270EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
271    auto it = dev_data->eventMap.find(event);
272    if (it == dev_data->eventMap.end()) {
273        return nullptr;
274    }
275    return &it->second;
276}
277
278QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
279    auto it = dev_data->queryPoolMap.find(query_pool);
280    if (it == dev_data->queryPoolMap.end()) {
281        return nullptr;
282    }
283    return &it->second;
284}
285
286QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
287    auto it = dev_data->queueMap.find(queue);
288    if (it == dev_data->queueMap.end()) {
289        return nullptr;
290    }
291    return &it->second;
292}
293
294SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
295    auto it = dev_data->semaphoreMap.find(semaphore);
296    if (it == dev_data->semaphoreMap.end()) {
297        return nullptr;
298    }
299    return &it->second;
300}
301
302COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
303    auto it = dev_data->commandPoolMap.find(pool);
304    if (it == dev_data->commandPoolMap.end()) {
305        return nullptr;
306    }
307    return &it->second;
308}
309
310PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
311    auto it = instance_data->physical_device_map.find(phys);
312    if (it == instance_data->physical_device_map.end()) {
313        return nullptr;
314    }
315    return &it->second;
316}
317
318SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
319    auto it = instance_data->surface_map.find(surface);
320    if (it == instance_data->surface_map.end()) {
321        return nullptr;
322    }
323    return &it->second;
324}
325
326DeviceExtensions const *GetEnabledExtensions(layer_data const *dev_data) {
327    return &dev_data->extensions;
328}
329
330// Return ptr to memory binding for given handle of specified type
331static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
332    switch (type) {
333        case kVulkanObjectTypeImage:
334            return GetImageState(dev_data, VkImage(handle));
335        case kVulkanObjectTypeBuffer:
336            return GetBufferState(dev_data, VkBuffer(handle));
337        default:
338            break;
339    }
340    return nullptr;
341}
342// prototype
343GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
344
345// Return ptr to info in map container containing mem, or NULL if not found
346//  Calls to this function should be wrapped in mutex
347DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
348    auto mem_it = dev_data->memObjMap.find(mem);
349    if (mem_it == dev_data->memObjMap.end()) {
350        return NULL;
351    }
352    return mem_it->second.get();
353}
354
355static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
356                             const VkMemoryAllocateInfo *pAllocateInfo) {
357    assert(object != NULL);
358
359    dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
360}
361
362// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
363static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle, VulkanObjectType type,
364                                  const char *functionName) {
365    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
366    if (mem_info) {
367        if (!mem_info->bound_ranges[bound_object_handle].valid) {
368            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
369                           HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
370                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
371                           ", please fill the memory before using.",
372                           functionName, HandleToUint64(mem), object_string[type], bound_object_handle);
373        }
374    }
375    return false;
376}
377// For given image_state
378//  If mem is special swapchain key, then verify that image_state valid member is true
379//  Else verify that the image's bound memory range is valid
380bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
381    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
382        if (!image_state->valid) {
383            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
384                           HandleToUint64(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
385                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
386                           functionName, HandleToUint64(image_state->image));
387        }
388    } else {
389        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), kVulkanObjectTypeImage,
390                                     functionName);
391    }
392    return false;
393}
394// For given buffer_state, verify that the range it's bound to is valid
395bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
396    return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer,
397                                 functionName);
398}
399// For the given memory allocation, set the range bound by the given handle object to the valid param value
400static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
401    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
402    if (mem_info) {
403        mem_info->bound_ranges[handle].valid = valid;
404    }
405}
406// For given image node
407//  If mem is special swapchain key, then set entire image_state to valid param value
408//  Else set the image's bound memory range to valid param value
409void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
410    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
411        image_state->valid = valid;
412    } else {
413        SetMemoryValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), valid);
414    }
415}
416// For given buffer node set the buffer's bound memory range to valid param value
417void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
418    SetMemoryValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), valid);
419}
420
421// Create binding link between given sampler and command buffer node
422void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
423    sampler_state->cb_bindings.insert(cb_node);
424    cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
425}
426
427// Create binding link between given image node and command buffer node
428void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
429    // Skip validation if this image was created through WSI
430    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
431        // First update CB binding in MemObj mini CB list
432        for (auto mem_binding : image_state->GetBoundMemory()) {
433            DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
434            if (pMemInfo) {
435                pMemInfo->cb_bindings.insert(cb_node);
436                // Now update CBInfo's Mem reference list
437                cb_node->memObjs.insert(mem_binding);
438            }
439        }
440        // Now update cb binding for image
441        cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
442        image_state->cb_bindings.insert(cb_node);
443    }
444}
445
446// Create binding link between given image view node and its image with command buffer node
447void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
448    // First add bindings for imageView
449    view_state->cb_bindings.insert(cb_node);
450    cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
451    auto image_state = GetImageState(dev_data, view_state->create_info.image);
452    // Add bindings for image within imageView
453    if (image_state) {
454        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
455    }
456}
457
458// Create binding link between given buffer node and command buffer node
459void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
460    // First update CB binding in MemObj mini CB list
461    for (auto mem_binding : buffer_state->GetBoundMemory()) {
462        DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
463        if (pMemInfo) {
464            pMemInfo->cb_bindings.insert(cb_node);
465            // Now update CBInfo's Mem reference list
466            cb_node->memObjs.insert(mem_binding);
467        }
468    }
469    // Now update cb binding for buffer
470    cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
471    buffer_state->cb_bindings.insert(cb_node);
472}
473
474// Create binding link between given buffer view node and its buffer with command buffer node
475void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
476    // First add bindings for bufferView
477    view_state->cb_bindings.insert(cb_node);
478    cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
479    auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
480    // Add bindings for buffer within bufferView
481    if (buffer_state) {
482        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
483    }
484}
485
486// For every mem obj bound to particular CB, free bindings related to that CB
487static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
488    if (cb_node) {
489        if (cb_node->memObjs.size() > 0) {
490            for (auto mem : cb_node->memObjs) {
491                DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
492                if (pInfo) {
493                    pInfo->cb_bindings.erase(cb_node);
494                }
495            }
496            cb_node->memObjs.clear();
497        }
498    }
499}
500
501// Clear a single object binding from given memory object, or report error if binding is missing
502static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
503    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
504    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
505    if (mem_info) {
506        mem_info->obj_bindings.erase({handle, type});
507    }
508    return false;
509}
510
511// ClearMemoryObjectBindings clears the binding of objects to memory
512//  For the given object it pulls the memory bindings and makes sure that the bindings
513//  no longer refer to the object being cleared. This occurs when objects are destroyed.
514bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
515    bool skip = false;
516    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
517    if (mem_binding) {
518        if (!mem_binding->sparse) {
519            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
520        } else {  // Sparse, clear all bindings
521            for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
522                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
523            }
524        }
525    }
526    return skip;
527}
528
529// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
530bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
531                              const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
532    bool result = false;
533    if (VK_NULL_HANDLE == mem) {
534        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
535                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
536                                                      " used with no memory bound. Memory should be bound by calling "
537                                                      "vkBind%sMemory(). %s",
538                         api_name, type_name, handle, type_name, validation_error_map[error_code]);
539    } else if (MEMORY_UNBOUND == mem) {
540        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
541                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
542                                                      " used with no memory bound and previously bound memory was freed. "
543                                                      "Memory must not be freed prior to this operation. %s",
544                         api_name, type_name, handle, validation_error_map[error_code]);
545    }
546    return result;
547}
548
549// Check to see if memory was ever bound to this image
550bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
551                                  UNIQUE_VALIDATION_ERROR_CODE error_code) {
552    bool result = false;
553    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
554        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
555                                          error_code);
556    }
557    return result;
558}
559
560// Check to see if memory was bound to this buffer
561bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
562                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
563    bool result = false;
564    if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
565        result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
566                                          "Buffer", error_code);
567    }
568    return result;
569}
570
571// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
572// Corresponding valid usage checks are in ValidateSetMemBinding().
573static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type, const char *apiName) {
574    if (mem != VK_NULL_HANDLE) {
575        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
576        assert(mem_binding);
577        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
578        if (mem_info) {
579            mem_info->obj_bindings.insert({handle, type});
580            // For image objects, make sure default memory state is correctly set
581            // TODO : What's the best/correct way to handle this?
582            if (kVulkanObjectTypeImage == type) {
583                auto const image_state = GetImageState(dev_data, VkImage(handle));
584                if (image_state) {
585                    VkImageCreateInfo ici = image_state->createInfo;
586                    if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
587                        // TODO::  More memory state transition stuff.
588                    }
589                }
590            }
591            mem_binding->binding.mem = mem;
592        }
593    }
594}
595
596// Valid usage checks for a call to SetMemBinding().
597// For NULL mem case, output warning
598// Make sure given object is in global object map
599//  IF a previous binding existed, output validation error
600//  Otherwise, add reference from objectInfo to memoryInfo
601//  Add reference off of objInfo
602// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
603static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
604                                  const char *apiName) {
605    bool skip = false;
606    // It's an error to bind an object to NULL memory
607    if (mem != VK_NULL_HANDLE) {
608        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
609        assert(mem_binding);
610        if (mem_binding->sparse) {
611            UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_1740082a;
612            const char *handle_type = "IMAGE";
613            if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
614                error_code = VALIDATION_ERROR_1700080c;
615                handle_type = "BUFFER";
616            } else {
617                assert(strcmp(apiName, "vkBindImageMemory()") == 0);
618            }
619            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
620                            HandleToUint64(mem), __LINE__, error_code, "MEM",
621                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
622                            ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT). %s",
623                            apiName, HandleToUint64(mem), handle, handle_type, validation_error_map[error_code]);
624        }
625        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
626        if (mem_info) {
627            DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
628            if (prev_binding) {
629                UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_17400828;
630                if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
631                    error_code = VALIDATION_ERROR_1700080a;
632                } else {
633                    assert(strcmp(apiName, "vkBindImageMemory()") == 0);
634                }
635                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
636                                HandleToUint64(mem), __LINE__, error_code, "MEM",
637                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
638                                ") which has already been bound to mem object 0x%" PRIxLEAST64 ". %s",
639                                apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem),
640                                validation_error_map[error_code]);
641            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
642                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
643                                HandleToUint64(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
644                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
645                                ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
646                                "Vulkan so this attempt to bind to new memory is not allowed.",
647                                apiName, HandleToUint64(mem), handle);
648            }
649        }
650    }
651    return skip;
652}
653
654// For NULL mem case, clear any previous binding Else...
655// Make sure given object is in its object map
656//  IF a previous binding existed, update binding
657//  Add reference from objectInfo to memoryInfo
658//  Add reference off of object's binding info
659// Return VK_TRUE if addition is successful, VK_FALSE otherwise
660static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
661    bool skip = VK_FALSE;
662    // Handle NULL case separately, just clear previous binding & decrement reference
663    if (binding.mem == VK_NULL_HANDLE) {
664        // TODO : This should cause the range of the resource to be unbound according to spec
665    } else {
666        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
667        assert(mem_binding);
668        assert(mem_binding->sparse);
669        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
670        if (mem_info) {
671            mem_info->obj_bindings.insert({handle, type});
672            // Need to set mem binding for this object
673            mem_binding->sparse_bindings.insert(binding);
674        }
675    }
676    return skip;
677}
678
679// Check object status for selected flag state
680static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
681                            const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
682    if (!(pNode->status & status_mask)) {
683        char const *const message = validation_error_map[msg_code];
684        return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
685                       HandleToUint64(pNode->commandBuffer), __LINE__, msg_code, "DS", "command buffer object 0x%p: %s. %s.",
686                       pNode->commandBuffer, fail_msg, message);
687    }
688    return false;
689}
690
691// Retrieve pipeline node ptr for given pipeline object
692static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
693    auto it = dev_data->pipelineMap.find(pipeline);
694    if (it == dev_data->pipelineMap.end()) {
695        return nullptr;
696    }
697    return it->second;
698}
699
700RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
701    auto it = dev_data->renderPassMap.find(renderpass);
702    if (it == dev_data->renderPassMap.end()) {
703        return nullptr;
704    }
705    return it->second.get();
706}
707
708FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
709    auto it = dev_data->frameBufferMap.find(framebuffer);
710    if (it == dev_data->frameBufferMap.end()) {
711        return nullptr;
712    }
713    return it->second.get();
714}
715
716std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
717                                                                                         VkDescriptorSetLayout dsLayout) {
718    auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
719    if (it == dev_data->descriptorSetLayoutMap.end()) {
720        return nullptr;
721    }
722    return it->second;
723}
724
725static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
726    auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
727    if (it == dev_data->pipelineLayoutMap.end()) {
728        return nullptr;
729    }
730    return &it->second;
731}
732
733shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
734    auto it = dev_data->shaderModuleMap.find(module);
735    if (it == dev_data->shaderModuleMap.end()) {
736        return nullptr;
737    }
738    return it->second.get();
739}
740
741// Return true if for a given PSO, the given state enum is dynamic, else return false
742static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
743    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
744        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
745            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
746        }
747    }
748    return false;
749}
750
751// Validate state stored as flags at time of draw call
752static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
753                                      UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
754    bool result = false;
755    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
756        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
757         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
758        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
759                                  "Dynamic line width state not set for this command buffer", msg_code);
760    }
761    if (pPipe->graphicsPipelineCI.pRasterizationState &&
762        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
763        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
764                                  "Dynamic depth bias state not set for this command buffer", msg_code);
765    }
766    if (pPipe->blendConstantsEnabled) {
767        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
768                                  "Dynamic blend constants state not set for this command buffer", msg_code);
769    }
770    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
771        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
772        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
773                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
774    }
775    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
776        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
777        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
778                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
779        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
780                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
781        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
782                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
783    }
784    if (indexed) {
785        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
786                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
787    }
788
789    return result;
790}
791
792// Verify attachment reference compatibility according to spec
793//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
794//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
795//   to make sure that format and samples counts match.
796//  If not, they are not compatible.
797static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
798                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
799                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
800                                             const VkAttachmentDescription *pSecondaryAttachments) {
801    // Check potential NULL cases first to avoid nullptr issues later
802    if (pPrimary == nullptr) {
803        if (pSecondary == nullptr) {
804            return true;
805        }
806        return false;
807    } else if (pSecondary == nullptr) {
808        return false;
809    }
810    if (index >= primaryCount) {  // Check secondary as if primary is VK_ATTACHMENT_UNUSED
811        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) return true;
812    } else if (index >= secondaryCount) {  // Check primary as if secondary is VK_ATTACHMENT_UNUSED
813        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) return true;
814    } else {  // Format and sample count must match
815        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
816            return true;
817        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
818            return false;
819        }
820        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
821             pSecondaryAttachments[pSecondary[index].attachment].format) &&
822            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
823             pSecondaryAttachments[pSecondary[index].attachment].samples))
824            return true;
825    }
826    // Format and sample counts didn't match
827    return false;
828}
829// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
830// For given primary RenderPass object and secondary RenderPassCreateInfo, verify that they're compatible
831static bool verify_renderpass_compatibility(const layer_data *dev_data, const VkRenderPassCreateInfo *primaryRPCI,
832                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
833    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
834        stringstream errorStr;
835        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
836                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
837        errorMsg = errorStr.str();
838        return false;
839    }
840    uint32_t spIndex = 0;
841    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
842        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
843        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
844        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
845        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
846        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
847            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
848                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
849                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
850                stringstream errorStr;
851                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
852                errorMsg = errorStr.str();
853                return false;
854            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
855                                                         primaryColorCount, primaryRPCI->pAttachments,
856                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
857                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
858                stringstream errorStr;
859                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
860                errorMsg = errorStr.str();
861                return false;
862            }
863        }
864
865        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 1,
866                                              primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
867                                              1, secondaryRPCI->pAttachments)) {
868            stringstream errorStr;
869            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
870            errorMsg = errorStr.str();
871            return false;
872        }
873
874        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
875        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
876        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
877        for (uint32_t i = 0; i < inputMax; ++i) {
878            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryInputCount,
879                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
880                                                  secondaryInputCount, secondaryRPCI->pAttachments)) {
881                stringstream errorStr;
882                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
883                errorMsg = errorStr.str();
884                return false;
885            }
886        }
887    }
888    return true;
889}
890
891// Return Set node ptr for specified set or else NULL
892cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
893    auto set_it = dev_data->setMap.find(set);
894    if (set_it == dev_data->setMap.end()) {
895        return NULL;
896    }
897    return set_it->second;
898}
899
900// For given pipeline, return number of MSAA samples, or one if MSAA disabled
901static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
902    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
903        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
904        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
905    }
906    return VK_SAMPLE_COUNT_1_BIT;
907}
908
909static void list_bits(std::ostream &s, uint32_t bits) {
910    for (int i = 0; i < 32 && bits; i++) {
911        if (bits & (1 << i)) {
912            s << i;
913            bits &= ~(1 << i);
914            if (bits) {
915                s << ",";
916            }
917        }
918    }
919}
920
921// Validate draw-time state related to the PSO
922static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
923                                          PIPELINE_STATE const *pPipeline) {
924    bool skip = false;
925
926    // Verify vertex binding
927    if (pPipeline->vertexBindingDescriptions.size() > 0) {
928        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
929            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
930            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
931                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
932                skip |=
933                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
934                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
935                            "The Pipeline State Object (0x%" PRIxLEAST64
936                            ") expects that this Command Buffer's vertex binding Index %u "
937                            "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
938                            "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
939                            HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
940            }
941        }
942    } else {
943        if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
944            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
945                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
946                            DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
947                            "Vertex buffers are bound to command buffer (0x%p"
948                            ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
949                            pCB->commandBuffer, HandleToUint64(state.pipeline_state->pipeline));
950        }
951    }
952    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
953    // Skip check if rasterization is disabled or there is no viewport.
954    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
955         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
956        pPipeline->graphicsPipelineCI.pViewportState) {
957        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
958        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
959
960        if (dynViewport) {
961            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
962            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
963            if (missingViewportMask) {
964                std::stringstream ss;
965                ss << "Dynamic viewport(s) ";
966                list_bits(ss, missingViewportMask);
967                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
968                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
969                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
970            }
971        }
972
973        if (dynScissor) {
974            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
975            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
976            if (missingScissorMask) {
977                std::stringstream ss;
978                ss << "Dynamic scissor(s) ";
979                list_bits(ss, missingScissorMask);
980                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
981                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
982                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
983            }
984        }
985    }
986
987    // Verify that any MSAA request in PSO matches sample# in bound FB
988    // Skip the check if rasterization is disabled.
989    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
990        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
991        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
992        if (pCB->activeRenderPass) {
993            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
994            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
995            uint32_t i;
996            unsigned subpass_num_samples = 0;
997
998            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
999                auto attachment = subpass_desc->pColorAttachments[i].attachment;
1000                if (attachment != VK_ATTACHMENT_UNUSED)
1001                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1002            }
1003
1004            if (subpass_desc->pDepthStencilAttachment &&
1005                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1006                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1007                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1008            }
1009
1010            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
1011                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1012                                HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1013                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
1014                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
1015                                HandleToUint64(pPipeline->pipeline), pso_num_samples,
1016                                HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
1017            }
1018        } else {
1019            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1020                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1021                            "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
1022                            HandleToUint64(pPipeline->pipeline));
1023        }
1024    }
1025    // Verify that PSO creation renderPass is compatible with active renderPass
1026    if (pCB->activeRenderPass) {
1027        std::string err_string;
1028        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
1029            !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
1030                                             err_string)) {
1031            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1032            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1033                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
1034                            "At Draw time the active render pass (0x%" PRIxLEAST64
1035                            ") is incompatible w/ gfx pipeline "
1036                            "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
1037                            HandleToUint64(pCB->activeRenderPass->renderPass), HandleToUint64(pPipeline->pipeline),
1038                            HandleToUint64(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
1039        }
1040
1041        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1042            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1043                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
1044                            "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
1045                            pCB->activeSubpass);
1046        }
1047    }
1048    // TODO : Add more checks here
1049
1050    return skip;
1051}
1052
1053// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1054// pipelineLayout[layoutIndex]
1055static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1056                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1057                                            string &errorMsg) {
1058    auto num_sets = pipeline_layout->set_layouts.size();
1059    if (layoutIndex >= num_sets) {
1060        stringstream errorStr;
1061        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1062                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1063                 << layoutIndex;
1064        errorMsg = errorStr.str();
1065        return false;
1066    }
1067    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1068    return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
1069}
1070
1071// Validate overall state at the time of a draw call
1072static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
1073                              const VkPipelineBindPoint bind_point, const char *function,
1074                              UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1075    bool result = false;
1076    auto const &state = cb_node->lastBound[bind_point];
1077    PIPELINE_STATE *pPipe = state.pipeline_state;
1078    if (nullptr == pPipe) {
1079        result |= log_msg(
1080            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1081            HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
1082            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
1083        // Early return as any further checks below will be busted w/o a pipeline
1084        if (result) return true;
1085    }
1086    // First check flag states
1087    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1088        result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
1089
1090    // Now complete other state checks
1091    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1092        string errorString;
1093        auto pipeline_layout = pPipe->pipeline_layout;
1094
1095        for (const auto &set_binding_pair : pPipe->active_slots) {
1096            uint32_t setIndex = set_binding_pair.first;
1097            // If valid set is not bound throw an error
1098            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1099                result |=
1100                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1101                            HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
1102                            "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.",
1103                            HandleToUint64(pPipe->pipeline), setIndex);
1104            } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
1105                                                        errorString)) {
1106                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1107                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1108                result |=
1109                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1110                            HandleToUint64(setHandle), __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
1111                            "VkDescriptorSet (0x%" PRIxLEAST64
1112                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
1113                            HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
1114            } else {  // Valid set is bound and layout compatible, validate that it's updated
1115                // Pull the set node
1116                cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1117                // Validate the draw-time state for this descriptor set
1118                std::string err_str;
1119                if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], cb_node, function,
1120                                                       &err_str)) {
1121                    auto set = descriptor_set->GetSet();
1122                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1123                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), __LINE__,
1124                                      DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
1125                                      "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s time: %s",
1126                                      HandleToUint64(set), function, err_str.c_str());
1127                }
1128            }
1129        }
1130    }
1131
1132    // Check general pipeline state that needs to be validated at drawtime
1133    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, pPipe);
1134
1135    return result;
1136}
1137
1138static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1139    auto const &state = cb_state->lastBound[bind_point];
1140    PIPELINE_STATE *pPipe = state.pipeline_state;
1141    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1142        for (const auto &set_binding_pair : pPipe->active_slots) {
1143            uint32_t setIndex = set_binding_pair.first;
1144            // Pull the set node
1145            cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1146            // Bind this set and its active descriptor resources to the command buffer
1147            descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
1148            // For given active slots record updated images & buffers
1149            descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
1150        }
1151    }
1152    if (pPipe->vertexBindingDescriptions.size() > 0) {
1153        cb_state->vertex_buffer_used = true;
1154    }
1155}
1156
1157// Validate HW line width capabilities prior to setting requested line width.
1158static bool verifyLineWidth(layer_data *dev_data, DRAW_STATE_ERROR dsError, VulkanObjectType object_type, const uint64_t &target,
1159                            float lineWidth) {
1160    bool skip = false;
1161
1162    // First check to see if the physical device supports wide lines.
1163    if ((VK_FALSE == dev_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
1164        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target, __LINE__,
1165                        dsError, "DS",
1166                        "Attempt to set lineWidth to %f but physical device wideLines feature "
1167                        "not supported/enabled so lineWidth must be 1.0f!",
1168                        lineWidth);
1169    } else {
1170        // Otherwise, make sure the width falls in the valid range.
1171        if ((dev_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
1172            (dev_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
1173            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target,
1174                            __LINE__, dsError, "DS",
1175                            "Attempt to set lineWidth to %f but physical device limits line width "
1176                            "to between [%f, %f]!",
1177                            lineWidth, dev_data->phys_dev_properties.properties.limits.lineWidthRange[0],
1178                            dev_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
1179        }
1180    }
1181
1182    return skip;
1183}
1184
1185static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<PIPELINE_STATE *> const &pPipelines, int pipelineIndex) {
1186    bool skip = false;
1187
1188    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
1189
1190    // If create derivative bit is set, check that we've specified a base
1191    // pipeline correctly, and that the base pipeline was created to allow
1192    // derivatives.
1193    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1194        PIPELINE_STATE *pBasePipeline = nullptr;
1195        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1196              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1197            // This check is a superset of VALIDATION_ERROR_096005a8 and VALIDATION_ERROR_096005aa
1198            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1199                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1200                            "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1201        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1202            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1203                skip |=
1204                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1205                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_208005a0, "DS",
1206                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
1207                            validation_error_map[VALIDATION_ERROR_208005a0]);
1208            } else {
1209                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
1210            }
1211        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1212            pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
1213        }
1214
1215        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1216            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1217                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1218                            "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1219        }
1220    }
1221
1222    return skip;
1223}
1224
1225// UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
1226static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<PIPELINE_STATE *> const &pPipelines, int pipelineIndex) {
1227    bool skip = false;
1228
1229        PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
1230
1231    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
1232    // produces nonsense errors that confuse users. Other layers should already
1233    // emit errors for renderpass being invalid.
1234    auto subpass_desc = &pPipeline->render_pass_ci.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1235    if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->render_pass_ci.subpassCount) {
1236        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1237                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ee, "DS",
1238                        "Invalid Pipeline CreateInfo State: Subpass index %u "
1239                            "is out of range for this renderpass (0..%u). %s",
1240                        pPipeline->graphicsPipelineCI.subpass, pPipeline->render_pass_ci.subpassCount - 1,
1241                        validation_error_map[VALIDATION_ERROR_096005ee]);
1242        subpass_desc = nullptr;
1243    }
1244
1245    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1246        const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1247        if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1248            skip |= log_msg(
1249                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1250                HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005d4, "DS",
1251                "vkCreateGraphicsPipelines(): Render pass (0x%" PRIxLEAST64
1252                ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u. %s",
1253                HandleToUint64(pPipeline->graphicsPipelineCI.renderPass), pPipeline->graphicsPipelineCI.subpass,
1254                subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount,
1255                validation_error_map[VALIDATION_ERROR_096005d4]);
1256        }
1257        if (!dev_data->enabled_features.independentBlend) {
1258            if (pPipeline->attachments.size() > 1) {
1259                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1260                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1261                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1262                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1263                    // only attachment state, so memcmp is best suited for the comparison
1264                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1265                               sizeof(pAttachments[0]))) {
1266                        skip |=
1267                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1268                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004ba, "DS",
1269                                    "Invalid Pipeline CreateInfo: If independent blend feature not "
1270                                    "enabled, all elements of pAttachments must be identical. %s",
1271                                    validation_error_map[VALIDATION_ERROR_0f4004ba]);
1272                        break;
1273                    }
1274                }
1275            }
1276        }
1277        if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1278            skip |=
1279                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1280                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004bc, "DS",
1281                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
1282                        validation_error_map[VALIDATION_ERROR_0f4004bc]);
1283        }
1284    }
1285
1286    if (validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
1287        skip = true;
1288    }
1289    // Each shader's stage must be unique
1290    if (pPipeline->duplicate_shaders) {
1291        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1292            if (pPipeline->duplicate_shaders & stage) {
1293                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1294                                HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1295                                "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1296                                string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1297            }
1298        }
1299    }
1300    // VS is required
1301    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1302        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1303                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ae, "DS",
1304                        "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
1305                        validation_error_map[VALIDATION_ERROR_096005ae]);
1306    }
1307    // Either both or neither TC/TE shaders should be defined
1308    bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1309    bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1310    if (has_control && !has_eval) {
1311        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1312                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b2, "DS",
1313                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1314                        validation_error_map[VALIDATION_ERROR_096005b2]);
1315    }
1316    if (!has_control && has_eval) {
1317        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1318                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b4, "DS",
1319                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1320                        validation_error_map[VALIDATION_ERROR_096005b4]);
1321    }
1322    // Compute shaders should be specified independent of Gfx shaders
1323    if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1324        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1325                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b0, "DS",
1326                        "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
1327                        validation_error_map[VALIDATION_ERROR_096005b0]);
1328    }
1329    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1330    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1331    if (has_control && has_eval &&
1332        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1333         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1334        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1335                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c0, "DS",
1336                        "Invalid Pipeline CreateInfo State: "
1337                        "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
1338                        "topology for tessellation pipelines. %s",
1339                        validation_error_map[VALIDATION_ERROR_096005c0]);
1340    }
1341    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1342        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1343        if (!has_control || !has_eval) {
1344            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1345                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c2, "DS",
1346                            "Invalid Pipeline CreateInfo State: "
1347                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
1348                            "topology is only valid for tessellation pipelines. %s",
1349                            validation_error_map[VALIDATION_ERROR_096005c2]);
1350        }
1351    }
1352
1353    // If a rasterization state is provided...
1354    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1355        // Make sure that the line width conforms to the HW.
1356        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
1357            skip |=
1358                verifyLineWidth(dev_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, kVulkanObjectTypePipeline,
1359                                HandleToUint64(pPipeline->pipeline), pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
1360        }
1361
1362        if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1363            (!dev_data->enabled_features.depthClamp)) {
1364            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1365                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_1020061c, "DS",
1366                            "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable "
1367                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE. %s",
1368                            validation_error_map[VALIDATION_ERROR_1020061c]);
1369        }
1370
1371        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1372            (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1373            (!dev_data->enabled_features.depthBiasClamp)) {
1374            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1375                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1376                            "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
1377                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1378                            "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1379        }
1380
1381        // If rasterization is enabled...
1382        if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1383            if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1384                (!dev_data->enabled_features.alphaToOne)) {
1385                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1386                                HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_10000622, "DS",
1387                                "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1388                                "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE. %s",
1389                                validation_error_map[VALIDATION_ERROR_10000622]);
1390            }
1391
1392            // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1393            if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1394                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1395                if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1396                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1397                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e0, "DS",
1398                                    "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is "
1399                                    "enabled and subpass uses a depth/stencil attachment. %s",
1400                                    validation_error_map[VALIDATION_ERROR_096005e0]);
1401
1402                } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1403                           (!dev_data->enabled_features.depthBounds)) {
1404                    skip |= log_msg(
1405                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1406                        HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1407                        "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the depthBoundsTestEnable "
1408                        "member of the VkPipelineDepthStencilStateCreateInfo structure must be set to VK_FALSE.");
1409                }
1410            }
1411
1412            // If subpass uses color attachments, pColorBlendState must be valid pointer
1413            if (subpass_desc) {
1414                uint32_t color_attachment_count = 0;
1415                for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1416                    if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1417                        ++color_attachment_count;
1418                    }
1419                }
1420                if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1421                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1422                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e2, "DS",
1423                                    "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is "
1424                                    "enabled and subpass uses color attachments. %s",
1425                                    validation_error_map[VALIDATION_ERROR_096005e2]);
1426                }
1427            }
1428        }
1429    }
1430
1431    auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
1432    if (vi != NULL) {
1433        for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
1434            VkFormat format = vi->pVertexAttributeDescriptions[j].format;
1435            // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
1436            VkFormatProperties properties;
1437            dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format, &properties);
1438            if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
1439                skip |= log_msg(
1440                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1441                    __LINE__, VALIDATION_ERROR_14a004de, "IMAGE",
1442                    "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
1443                        "(%s) is not a supported vertex buffer format. %s",
1444                    pipelineIndex, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_14a004de]);
1445            }
1446        }
1447    }
1448
1449    return skip;
1450}
1451
1452// Free the Pipeline nodes
1453static void deletePipelines(layer_data *dev_data) {
1454    if (dev_data->pipelineMap.size() <= 0) return;
1455    for (auto &pipe_map_pair : dev_data->pipelineMap) {
1456        delete pipe_map_pair.second;
1457    }
1458    dev_data->pipelineMap.clear();
1459}
1460
1461// Block of code at start here specifically for managing/tracking DSs
1462
1463// Return Pool node ptr for specified pool or else NULL
1464DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
1465    auto pool_it = dev_data->descriptorPoolMap.find(pool);
1466    if (pool_it == dev_data->descriptorPoolMap.end()) {
1467        return NULL;
1468    }
1469    return pool_it->second;
1470}
1471
1472// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1473// func_str is the name of the calling function
1474// Return false if no errors occur
1475// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1476static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
1477    if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1478    bool skip = false;
1479    auto set_node = dev_data->setMap.find(set);
1480    if (set_node == dev_data->setMap.end()) {
1481        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1482                        HandleToUint64(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
1483                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
1484                        HandleToUint64(set));
1485    } else {
1486        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1487        if (set_node->second->in_use.load()) {
1488            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1489                            HandleToUint64(set), __LINE__, VALIDATION_ERROR_2860026a, "DS",
1490                            "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
1491                            func_str.c_str(), HandleToUint64(set), validation_error_map[VALIDATION_ERROR_2860026a]);
1492        }
1493    }
1494    return skip;
1495}
1496
1497// Remove set from setMap and delete the set
1498static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1499    dev_data->setMap.erase(descriptor_set->GetSet());
1500    delete descriptor_set;
1501}
1502// Free all DS Pools including their Sets & related sub-structs
1503// NOTE : Calls to this function should be wrapped in mutex
1504static void deletePools(layer_data *dev_data) {
1505    for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1506        // Remove this pools' sets from setMap and delete them
1507        for (auto ds : ii->second->sets) {
1508            freeDescriptorSet(dev_data, ds);
1509        }
1510        ii->second->sets.clear();
1511        delete ii->second;
1512        ii = dev_data->descriptorPoolMap.erase(ii);
1513    }
1514}
1515
1516static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
1517                                VkDescriptorPoolResetFlags flags) {
1518    DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1519    // TODO: validate flags
1520    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
1521    for (auto ds : pPool->sets) {
1522        freeDescriptorSet(dev_data, ds);
1523    }
1524    pPool->sets.clear();
1525    // Reset available count for each type and available sets for this pool
1526    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
1527        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
1528    }
1529    pPool->availableSets = pPool->maxSets;
1530}
1531
1532// For given CB object, fetch associated CB Node from map
1533GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
1534    auto it = dev_data->commandBufferMap.find(cb);
1535    if (it == dev_data->commandBufferMap.end()) {
1536        return NULL;
1537    }
1538    return it->second;
1539}
1540
1541// If a renderpass is active, verify that the given command type is appropriate for current subpass state
1542bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1543    if (!pCB->activeRenderPass) return false;
1544    bool skip = false;
1545    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1546        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
1547        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1548                        HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1549                        "Commands cannot be called in a subpass using secondary command buffers.");
1550    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1551        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1552                        HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1553                        "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1554    }
1555    return skip;
1556}
1557
1558bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
1559                           VkQueueFlags required_flags, UNIQUE_VALIDATION_ERROR_CODE error_code) {
1560    auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
1561    if (pool) {
1562        VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
1563        if (!(required_flags & queue_flags)) {
1564            string required_flags_string;
1565            for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1566                if (flag & required_flags) {
1567                    if (required_flags_string.size()) {
1568                        required_flags_string += " or ";
1569                    }
1570                    required_flags_string += string_VkQueueFlagBits(flag);
1571                }
1572            }
1573            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1574                           HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
1575                           "Cannot call %s on a command buffer allocated from a pool without %s capabilities. %s.", caller_name,
1576                           required_flags_string.c_str(), validation_error_map[error_code]);
1577        }
1578    }
1579    return false;
1580}
1581
1582static char const * GetCauseStr(VK_OBJECT obj) {
1583    if (obj.type == kVulkanObjectTypeDescriptorSet)
1584        return "destroyed or updated";
1585    if (obj.type == kVulkanObjectTypeCommandBuffer)
1586        return "destroyed or rerecorded";
1587    return "destroyed";
1588}
1589
1590static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
1591    bool skip = false;
1592    for (auto obj : cb_state->broken_bindings) {
1593        const char *type_str = object_string[obj.type];
1594        const char *cause_str = GetCauseStr(obj);
1595        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1596                        HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1597                        "You are adding %s to command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
1598                        call_source, cb_state->commandBuffer, type_str, obj.handle, cause_str);
1599    }
1600    return skip;
1601}
1602
1603// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1604// there's an issue with the Cmd ordering
1605bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1606    switch (cb_state->state) {
1607        case CB_RECORDING:
1608            return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1609
1610        case CB_INVALID_COMPLETE:
1611        case CB_INVALID_INCOMPLETE:
1612            return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1613
1614        default:
1615            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1616                           HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
1617                           "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
1618    }
1619}
1620
1621// For given object struct return a ptr of BASE_NODE type for its wrapping struct
1622BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
1623    BASE_NODE *base_ptr = nullptr;
1624    switch (object_struct.type) {
1625        case kVulkanObjectTypeDescriptorSet: {
1626            base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
1627            break;
1628        }
1629        case kVulkanObjectTypeSampler: {
1630            base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
1631            break;
1632        }
1633        case kVulkanObjectTypeQueryPool: {
1634            base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
1635            break;
1636        }
1637        case kVulkanObjectTypePipeline: {
1638            base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
1639            break;
1640        }
1641        case kVulkanObjectTypeBuffer: {
1642            base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
1643            break;
1644        }
1645        case kVulkanObjectTypeBufferView: {
1646            base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
1647            break;
1648        }
1649        case kVulkanObjectTypeImage: {
1650            base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
1651            break;
1652        }
1653        case kVulkanObjectTypeImageView: {
1654            base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
1655            break;
1656        }
1657        case kVulkanObjectTypeEvent: {
1658            base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
1659            break;
1660        }
1661        case kVulkanObjectTypeDescriptorPool: {
1662            base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
1663            break;
1664        }
1665        case kVulkanObjectTypeCommandPool: {
1666            base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
1667            break;
1668        }
1669        case kVulkanObjectTypeFramebuffer: {
1670            base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
1671            break;
1672        }
1673        case kVulkanObjectTypeRenderPass: {
1674            base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
1675            break;
1676        }
1677        case kVulkanObjectTypeDeviceMemory: {
1678            base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
1679            break;
1680        }
1681        default:
1682            // TODO : Any other objects to be handled here?
1683            assert(0);
1684            break;
1685    }
1686    return base_ptr;
1687}
1688
1689// Tie the VK_OBJECT to the cmd buffer which includes:
1690//  Add object_binding to cmd buffer
1691//  Add cb_binding to object
1692static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
1693    cb_bindings->insert(cb_node);
1694    cb_node->object_bindings.insert(obj);
1695}
1696// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
1697static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
1698    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
1699    if (base_obj) base_obj->cb_bindings.erase(cb_node);
1700}
1701// Reset the command buffer state
1702//  Maintain the createInfo and set state to CB_NEW, but clear all other state
1703static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
1704    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
1705    if (pCB) {
1706        pCB->in_use.store(0);
1707        // Reset CB state (note that createInfo is not cleared)
1708        pCB->commandBuffer = cb;
1709        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1710        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
1711        pCB->hasDrawCmd = false;
1712        pCB->state = CB_NEW;
1713        pCB->submitCount = 0;
1714        pCB->status = 0;
1715        pCB->viewportMask = 0;
1716        pCB->scissorMask = 0;
1717
1718        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
1719            pCB->lastBound[i].reset();
1720        }
1721
1722        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
1723        pCB->activeRenderPass = nullptr;
1724        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
1725        pCB->activeSubpass = 0;
1726        pCB->broken_bindings.clear();
1727        pCB->waitedEvents.clear();
1728        pCB->events.clear();
1729        pCB->writeEventsBeforeWait.clear();
1730        pCB->waitedEventsBeforeQueryReset.clear();
1731        pCB->queryToStateMap.clear();
1732        pCB->activeQueries.clear();
1733        pCB->startedQueries.clear();
1734        pCB->imageLayoutMap.clear();
1735        pCB->eventToStageMap.clear();
1736        pCB->drawData.clear();
1737        pCB->currentDrawData.buffers.clear();
1738        pCB->vertex_buffer_used = false;
1739        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
1740        // If secondary, invalidate any primary command buffer that may call us.
1741        if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1742            invalidateCommandBuffers(dev_data,
1743                                     pCB->linkedCommandBuffers,
1744                                     {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
1745        }
1746
1747        // Remove reverse command buffer links.
1748        for (auto pSubCB : pCB->linkedCommandBuffers) {
1749            pSubCB->linkedCommandBuffers.erase(pCB);
1750        }
1751        pCB->linkedCommandBuffers.clear();
1752        pCB->updateImages.clear();
1753        pCB->updateBuffers.clear();
1754        clear_cmd_buf_and_mem_references(dev_data, pCB);
1755        pCB->validate_functions.clear();
1756        pCB->eventUpdates.clear();
1757        pCB->queryUpdates.clear();
1758
1759        // Remove object bindings
1760        for (auto obj : pCB->object_bindings) {
1761            removeCommandBufferBinding(dev_data, &obj, pCB);
1762        }
1763        pCB->object_bindings.clear();
1764        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
1765        for (auto framebuffer : pCB->framebuffers) {
1766            auto fb_state = GetFramebufferState(dev_data, framebuffer);
1767            if (fb_state) fb_state->cb_bindings.erase(pCB);
1768        }
1769        pCB->framebuffers.clear();
1770        pCB->activeFramebuffer = VK_NULL_HANDLE;
1771    }
1772}
1773
1774// Set PSO-related status bits for CB, including dynamic state set via PSO
1775static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
1776    // Account for any dynamic state not set via this PSO
1777    if (!pPipe->graphicsPipelineCI.pDynamicState ||
1778        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) {  // All state is static
1779        pCB->status |= CBSTATUS_ALL_STATE_SET;
1780    } else {
1781        // First consider all state on
1782        // Then unset any state that's noted as dynamic in PSO
1783        // Finally OR that into CB statemask
1784        CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
1785        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
1786            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
1787                case VK_DYNAMIC_STATE_LINE_WIDTH:
1788                    psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
1789                    break;
1790                case VK_DYNAMIC_STATE_DEPTH_BIAS:
1791                    psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
1792                    break;
1793                case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1794                    psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
1795                    break;
1796                case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1797                    psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
1798                    break;
1799                case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1800                    psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
1801                    break;
1802                case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1803                    psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
1804                    break;
1805                case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1806                    psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
1807                    break;
1808                default:
1809                    // TODO : Flag error here
1810                    break;
1811            }
1812        }
1813        pCB->status |= psoDynStateMask;
1814    }
1815}
1816
1817// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
1818// render pass.
1819bool insideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName,
1820                      UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1821    bool inside = false;
1822    if (pCB->activeRenderPass) {
1823        inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1824                         HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1825                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
1826                         HandleToUint64(pCB->activeRenderPass->renderPass), validation_error_map[msgCode]);
1827    }
1828    return inside;
1829}
1830
1831// Flags validation error if the associated call is made outside a render pass. The apiName
1832// routine should ONLY be called inside a render pass.
1833bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1834    bool outside = false;
1835    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
1836        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
1837         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
1838        outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1839                          HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1840                          "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
1841    }
1842    return outside;
1843}
1844
1845static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
1846    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
1847}
1848
1849// For the given ValidationCheck enum, set all relevant instance disabled flags to true
1850void SetDisabledFlags(instance_layer_data *instance_data, VkValidationFlagsEXT *val_flags_struct) {
1851    for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
1852        switch (val_flags_struct->pDisabledValidationChecks[i]) {
1853            case VK_VALIDATION_CHECK_SHADERS_EXT:
1854                instance_data->disabled.shader_validation = true;
1855                break;
1856            case VK_VALIDATION_CHECK_ALL_EXT:
1857                // Set all disabled flags to true
1858                instance_data->disabled.SetAll(true);
1859                break;
1860            default:
1861                break;
1862        }
1863    }
1864}
1865
1866VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
1867                                              VkInstance *pInstance) {
1868    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
1869
1870    assert(chain_info->u.pLayerInfo);
1871    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
1872    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
1873    if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
1874
1875    // Advance the link info for the next element on the chain
1876    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
1877
1878    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
1879    if (result != VK_SUCCESS) return result;
1880
1881    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
1882    instance_data->instance = *pInstance;
1883    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
1884    instance_data->report_data = debug_report_create_instance(
1885        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
1886    instance_data->extensions.InitFromInstanceCreateInfo(pCreateInfo);
1887    init_core_validation(instance_data, pAllocator);
1888
1889    ValidateLayerOrdering(*pCreateInfo);
1890    // Parse any pNext chains
1891    if (pCreateInfo->pNext) {
1892        GENERIC_HEADER *struct_header = (GENERIC_HEADER *)pCreateInfo->pNext;
1893        while (struct_header) {
1894            // Check for VkValidationFlagsExt
1895            if (VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT == struct_header->sType) {
1896                SetDisabledFlags(instance_data, (VkValidationFlagsEXT *)struct_header);
1897            }
1898            struct_header = (GENERIC_HEADER *)struct_header->pNext;
1899        }
1900    }
1901
1902    return result;
1903}
1904
1905// Hook DestroyInstance to remove tableInstanceMap entry
1906VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
1907    // TODOSC : Shouldn't need any customization here
1908    dispatch_key key = get_dispatch_key(instance);
1909    // TBD: Need any locking this early, in case this function is called at the
1910    // same time by more than one thread?
1911    instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
1912    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
1913
1914    lock_guard_t lock(global_lock);
1915    // Clean up logging callback, if any
1916    while (instance_data->logging_callback.size() > 0) {
1917        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
1918        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
1919        instance_data->logging_callback.pop_back();
1920    }
1921
1922    layer_debug_report_destroy_instance(instance_data->report_data);
1923    FreeLayerDataPtr(key, instance_layer_data_map);
1924}
1925
1926static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1927                                              uint32_t requested_queue_family, int32_t err_code, const char *cmd_name,
1928                                              const char *queue_family_var_name, const char *vu_note = nullptr) {
1929    bool skip = false;
1930
1931    if (!vu_note) vu_note = validation_error_map[err_code];
1932
1933    const char *conditional_ext_cmd =
1934        instance_data->extensions.vk_khr_get_physical_device_properties_2 ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR" : "";
1935
1936    std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
1937                                 ? "the pQueueFamilyPropertyCount was never obtained"
1938                                 : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
1939
1940    if (requested_queue_family >= pd_state->queue_family_count) {
1941        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
1942                        HandleToUint64(pd_state->phys_device), __LINE__, err_code, "DL",
1943                        "%s: %s (= %" PRIu32
1944                        ") is not less than any previously obtained pQueueFamilyPropertyCount from "
1945                        "vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
1946                        cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str(), vu_note);
1947    }
1948    return skip;
1949}
1950
1951// Verify VkDeviceQueueCreateInfos
1952static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1953                                           uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
1954    bool skip = false;
1955
1956    for (uint32_t i = 0; i < info_count; ++i) {
1957        const auto requested_queue_family = infos[i].queueFamilyIndex;
1958
1959        // Verify that requested queue family is known to be valid at this point in time
1960        std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
1961        skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family, VALIDATION_ERROR_06c002fa,
1962                                                  "vkCreateDevice", queue_family_var_name.c_str());
1963
1964        // Verify that requested  queue count of queue family is known to be valid at this point in time
1965        if (requested_queue_family < pd_state->queue_family_count) {
1966            const auto requested_queue_count = infos[i].queueCount;
1967            const auto queue_family_props_count = pd_state->queue_family_properties.size();
1968            const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
1969            const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
1970                                                  ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR"
1971                                                  : "";
1972            std::string count_note =
1973                !queue_family_has_props
1974                    ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
1975                    : "i.e. is not less than or equal to " +
1976                          std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
1977
1978            if (!queue_family_has_props ||
1979                requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
1980                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1981                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), __LINE__,
1982                                VALIDATION_ERROR_06c002fc, "DL",
1983                                "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
1984                                ") is not "
1985                                "less than or equal to available queue count for this "
1986                                "pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex} (=%" PRIu32
1987                                ") obtained previously "
1988                                "from vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
1989                                i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str(),
1990                                validation_error_map[VALIDATION_ERROR_06c002fc]);
1991            }
1992        }
1993    }
1994
1995    return skip;
1996}
1997
1998// Verify that features have been queried and that they are available
1999static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2000                                      const VkPhysicalDeviceFeatures *requested_features) {
2001    bool skip = false;
2002
2003    const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features);
2004    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
2005    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
2006    //  Need to provide the struct member name with the issue. To do that seems like we'll
2007    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
2008    uint32_t errors = 0;
2009    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2010    for (uint32_t i = 0; i < total_bools; i++) {
2011        if (requested[i] > actual[i]) {
2012            // TODO: Add index to struct member name helper to be able to include a feature name
2013            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2014                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
2015                            "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
2016                            "which is not available on this device.",
2017                            i);
2018            errors++;
2019        }
2020    }
2021    if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
2022        // If user didn't request features, notify them that they should
2023        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
2024        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2025                        0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
2026                        "You requested features that are unavailable on this device. You should first query feature "
2027                        "availability by calling vkGetPhysicalDeviceFeatures().");
2028    }
2029    return skip;
2030}
2031
2032VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2033                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2034    bool skip = false;
2035    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2036
2037    unique_lock_t lock(global_lock);
2038    auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
2039
2040    // TODO: object_tracker should perhaps do this instead
2041    //       and it does not seem to currently work anyway -- the loader just crashes before this point
2042    if (!GetPhysicalDeviceState(instance_data, gpu)) {
2043        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2044                        0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
2045                        "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2046    }
2047
2048    // Check that any requested features are available
2049    if (pCreateInfo->pEnabledFeatures) {
2050        skip |= ValidateRequestedFeatures(instance_data, pd_state, pCreateInfo->pEnabledFeatures);
2051    }
2052    skip |=
2053        ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2054
2055    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2056
2057    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2058
2059    assert(chain_info->u.pLayerInfo);
2060    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2061    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2062    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
2063    if (fpCreateDevice == NULL) {
2064        return VK_ERROR_INITIALIZATION_FAILED;
2065    }
2066
2067    // Advance the link info for the next element on the chain
2068    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2069
2070    lock.unlock();
2071
2072    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
2073    if (result != VK_SUCCESS) {
2074        return result;
2075    }
2076
2077    lock.lock();
2078    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
2079
2080    device_data->instance_data = instance_data;
2081    // Setup device dispatch table
2082    layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
2083    device_data->device = *pDevice;
2084    // Save PhysicalDevice handle
2085    device_data->physical_device = gpu;
2086
2087    device_data->report_data = layer_debug_report_create_device(instance_data->report_data, *pDevice);
2088    device_data->extensions.InitFromDeviceCreateInfo(&instance_data->extensions, pCreateInfo);
2089
2090    // Get physical device limits for this device
2091    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
2092    uint32_t count;
2093    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2094    device_data->phys_dev_properties.queue_family_properties.resize(count);
2095    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
2096        gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
2097    // TODO: device limits should make sure these are compatible
2098    if (pCreateInfo->pEnabledFeatures) {
2099        device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
2100    } else {
2101        memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
2102    }
2103    // Store physical device properties and physical device mem limits into device layer_data structs
2104    instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
2105    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
2106    lock.unlock();
2107
2108    ValidateLayerOrdering(*pCreateInfo);
2109
2110    return result;
2111}
2112
2113// prototype
2114VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2115    // TODOSC : Shouldn't need any customization here
2116    dispatch_key key = get_dispatch_key(device);
2117    layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
2118    // Free all the memory
2119    unique_lock_t lock(global_lock);
2120    deletePipelines(dev_data);
2121    dev_data->renderPassMap.clear();
2122    for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
2123        delete (*ii).second;
2124    }
2125    dev_data->commandBufferMap.clear();
2126    // This will also delete all sets in the pool & remove them from setMap
2127    deletePools(dev_data);
2128    // All sets should be removed
2129    assert(dev_data->setMap.empty());
2130    dev_data->descriptorSetLayoutMap.clear();
2131    dev_data->imageViewMap.clear();
2132    dev_data->imageMap.clear();
2133    dev_data->imageSubresourceMap.clear();
2134    dev_data->imageLayoutMap.clear();
2135    dev_data->bufferViewMap.clear();
2136    dev_data->bufferMap.clear();
2137    // Queues persist until device is destroyed
2138    dev_data->queueMap.clear();
2139    // Report any memory leaks
2140    layer_debug_report_destroy_device(device);
2141    lock.unlock();
2142
2143#if DISPATCH_MAP_DEBUG
2144    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
2145#endif
2146
2147    dev_data->dispatch_table.DestroyDevice(device, pAllocator);
2148    FreeLayerDataPtr(key, layer_data_map);
2149}
2150
2151static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
2152
2153// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2154//   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
2155static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2156                                         UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
2157    bool skip = false;
2158    if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2159        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2160                        geo_error_id, "DL",
2161                        "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when "
2162                        "device does not have geometryShader feature enabled. %s",
2163                        caller, validation_error_map[geo_error_id]);
2164    }
2165    if (!dev_data->enabled_features.tessellationShader &&
2166        (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2167        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2168                        tess_error_id, "DL",
2169                        "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT "
2170                        "and/or VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device "
2171                        "does not have tessellationShader feature enabled. %s",
2172                        caller, validation_error_map[tess_error_id]);
2173    }
2174    return skip;
2175}
2176
2177// Loop through bound objects and increment their in_use counts.
2178static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2179    for (auto obj : cb_node->object_bindings) {
2180        auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2181        if (base_obj) {
2182            base_obj->in_use.fetch_add(1);
2183        }
2184    }
2185}
2186// Track which resources are in-flight by atomically incrementing their "in_use" count
2187static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2188    cb_node->submitCount++;
2189    cb_node->in_use.fetch_add(1);
2190
2191    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2192    IncrementBoundObjects(dev_data, cb_node);
2193    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2194    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2195    //  should then be flagged prior to calling this function
2196    for (auto drawDataElement : cb_node->drawData) {
2197        for (auto buffer : drawDataElement.buffers) {
2198            auto buffer_state = GetBufferState(dev_data, buffer);
2199            if (buffer_state) {
2200                buffer_state->in_use.fetch_add(1);
2201            }
2202        }
2203    }
2204    for (auto event : cb_node->writeEventsBeforeWait) {
2205        auto event_state = GetEventNode(dev_data, event);
2206        if (event_state) event_state->write_in_use++;
2207    }
2208}
2209
2210// Note: This function assumes that the global lock is held by the calling thread.
2211// For the given queue, verify the queue state up to the given seq number.
2212// Currently the only check is to make sure that if there are events to be waited on prior to
2213//  a QueryReset, make sure that all such events have been signalled.
2214static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2215    bool skip = false;
2216
2217    // sequence number we want to validate up to, per queue
2218    std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs { { initial_queue, initial_seq } };
2219    // sequence number we've completed validation for, per queue
2220    std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2221    std::vector<QUEUE_STATE *> worklist { initial_queue };
2222
2223    while (worklist.size()) {
2224        auto queue = worklist.back();
2225        worklist.pop_back();
2226
2227        auto target_seq = target_seqs[queue];
2228        auto seq = std::max(done_seqs[queue], queue->seq);
2229        auto sub_it = queue->submissions.begin() + int(seq - queue->seq);  // seq >= queue->seq
2230
2231        for (; seq < target_seq; ++sub_it, ++seq) {
2232            for (auto &wait : sub_it->waitSemaphores) {
2233                auto other_queue = GetQueueState(dev_data, wait.queue);
2234
2235                if (other_queue == queue)
2236                    continue;   // semaphores /always/ point backwards, so no point here.
2237
2238                auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2239                auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2240
2241                // if this wait is for another queue, and covers new sequence
2242                // numbers beyond what we've already validated, mark the new
2243                // target seq and (possibly-re)add the queue to the worklist.
2244                if (other_done_seq < other_target_seq) {
2245                    target_seqs[other_queue] = other_target_seq;
2246                    worklist.push_back(other_queue);
2247                }
2248            }
2249
2250            for (auto cb : sub_it->cbs) {
2251                auto cb_node = GetCBNode(dev_data, cb);
2252                if (cb_node) {
2253                    for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
2254                        for (auto event : queryEventsPair.second) {
2255                            if (dev_data->eventMap[event].needsSignaled) {
2256                                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2257                                                VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
2258                                                "Cannot get query results on queryPool 0x%" PRIx64
2259                                                " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
2260                                                HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
2261                                                HandleToUint64(event));
2262                            }
2263                        }
2264                    }
2265                }
2266            }
2267        }
2268
2269        // finally mark the point we've now validated this queue to.
2270        done_seqs[queue] = seq;
2271    }
2272
2273    return skip;
2274}
2275
2276// When the given fence is retired, verify outstanding queue operations through the point of the fence
2277static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2278    auto fence_state = GetFenceNode(dev_data, fence);
2279    if (VK_NULL_HANDLE != fence_state->signaler.first) {
2280        return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
2281    }
2282    return false;
2283}
2284
2285// Decrement in-use count for objects bound to command buffer
2286static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2287    BASE_NODE *base_obj = nullptr;
2288    for (auto obj : cb_node->object_bindings) {
2289        base_obj = GetStateStructPtrFromObject(dev_data, obj);
2290        if (base_obj) {
2291            base_obj->in_use.fetch_sub(1);
2292        }
2293    }
2294}
2295
2296static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2297    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2298
2299    // Roll this queue forward, one submission at a time.
2300    while (pQueue->seq < seq) {
2301        auto &submission = pQueue->submissions.front();
2302
2303        for (auto &wait : submission.waitSemaphores) {
2304            auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
2305            if (pSemaphore) {
2306                pSemaphore->in_use.fetch_sub(1);
2307            }
2308            auto &lastSeq = otherQueueSeqs[wait.queue];
2309            lastSeq = std::max(lastSeq, wait.seq);
2310        }
2311
2312        for (auto &semaphore : submission.signalSemaphores) {
2313            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2314            if (pSemaphore) {
2315                pSemaphore->in_use.fetch_sub(1);
2316            }
2317        }
2318
2319        for (auto cb : submission.cbs) {
2320            auto cb_node = GetCBNode(dev_data, cb);
2321            if (!cb_node) {
2322                continue;
2323            }
2324            // First perform decrement on general case bound objects
2325            DecrementBoundResources(dev_data, cb_node);
2326            for (auto drawDataElement : cb_node->drawData) {
2327                for (auto buffer : drawDataElement.buffers) {
2328                    auto buffer_state = GetBufferState(dev_data, buffer);
2329                    if (buffer_state) {
2330                        buffer_state->in_use.fetch_sub(1);
2331                    }
2332                }
2333            }
2334            for (auto event : cb_node->writeEventsBeforeWait) {
2335                auto eventNode = dev_data->eventMap.find(event);
2336                if (eventNode != dev_data->eventMap.end()) {
2337                    eventNode->second.write_in_use--;
2338                }
2339            }
2340            for (auto queryStatePair : cb_node->queryToStateMap) {
2341                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2342            }
2343            for (auto eventStagePair : cb_node->eventToStageMap) {
2344                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2345            }
2346
2347            cb_node->in_use.fetch_sub(1);
2348        }
2349
2350        auto pFence = GetFenceNode(dev_data, submission.fence);
2351        if (pFence) {
2352            pFence->state = FENCE_RETIRED;
2353        }
2354
2355        pQueue->submissions.pop_front();
2356        pQueue->seq++;
2357    }
2358
2359    // Roll other queues forward to the highest seq we saw a wait for
2360    for (auto qs : otherQueueSeqs) {
2361        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
2362    }
2363}
2364
2365// Submit a fence to a queue, delimiting previous fences and previous untracked
2366// work by it.
2367static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2368    pFence->state = FENCE_INFLIGHT;
2369    pFence->signaler.first = pQueue->queue;
2370    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2371}
2372
2373static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2374    bool skip = false;
2375    if ((pCB->in_use.load() || current_submit_count > 1) &&
2376        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2377        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2378                        __LINE__, VALIDATION_ERROR_31a0008e, "DS",
2379                        "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
2380                        validation_error_map[VALIDATION_ERROR_31a0008e]);
2381    }
2382    return skip;
2383}
2384
2385static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2386                                       int current_submit_count, UNIQUE_VALIDATION_ERROR_CODE vu_id) {
2387    bool skip = false;
2388    if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2389    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2390    if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2391        (cb_state->submitCount + current_submit_count > 1)) {
2392        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2393                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
2394                        "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
2395                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
2396                        cb_state->commandBuffer, cb_state->submitCount + current_submit_count);
2397    }
2398
2399    // Validate that cmd buffers have been updated
2400    switch (cb_state->state) {
2401        case CB_INVALID_INCOMPLETE:
2402        case CB_INVALID_COMPLETE:
2403            skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2404            break;
2405
2406        case CB_NEW:
2407            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2408                            (uint64_t)(cb_state->commandBuffer), __LINE__, vu_id, "DS",
2409                            "Command buffer 0x%p used in the call to %s is unrecorded and contains no commands. %s",
2410                            cb_state->commandBuffer, call_source, validation_error_map[vu_id]);
2411            break;
2412
2413        case CB_RECORDING:
2414            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2415                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
2416                            "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!",
2417                            cb_state->commandBuffer, call_source);
2418            break;
2419
2420        default: /* recorded */
2421            break;
2422    }
2423    return skip;
2424}
2425
2426static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2427    bool skip = false;
2428
2429    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2430    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2431    //  should then be flagged prior to calling this function
2432    for (auto drawDataElement : cb_node->drawData) {
2433        for (auto buffer : drawDataElement.buffers) {
2434            auto buffer_state = GetBufferState(dev_data, buffer);
2435            if (!buffer_state) {
2436                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2437                                HandleToUint64(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
2438                                "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", HandleToUint64(buffer));
2439            }
2440        }
2441    }
2442    return skip;
2443}
2444
2445// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
2446bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
2447                           const uint32_t *indices) {
2448    bool found = false;
2449    bool skip = false;
2450    auto queue_state = GetQueueState(dev_data, queue);
2451    if (queue_state) {
2452        for (uint32_t i = 0; i < count; i++) {
2453            if (indices[i] == queue_state->queueFamilyIndex) {
2454                found = true;
2455                break;
2456            }
2457        }
2458
2459        if (!found) {
2460            skip = log_msg(
2461                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type], object->handle, __LINE__,
2462                DRAWSTATE_INVALID_QUEUE_FAMILY, "DS", "vkQueueSubmit: Command buffer 0x%" PRIxLEAST64 " contains %s 0x%" PRIxLEAST64
2463                                                      " which was not created allowing concurrent access to this queue family %d.",
2464                HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle, queue_state->queueFamilyIndex);
2465        }
2466    }
2467    return skip;
2468}
2469
2470// Validate that queueFamilyIndices of primary command buffers match this queue
2471// Secondary command buffers were previously validated in vkCmdExecuteCommands().
2472static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2473    bool skip = false;
2474    auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
2475    auto queue_state = GetQueueState(dev_data, queue);
2476
2477    if (pPool && queue_state) {
2478        if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2479            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2480                            HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_31a00094, "DS",
2481                            "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
2482                            "0x%p from queue family %d. %s",
2483                            pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
2484                            validation_error_map[VALIDATION_ERROR_31a00094]);
2485        }
2486
2487        // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2488        for (auto object : pCB->object_bindings) {
2489            if (object.type == kVulkanObjectTypeImage) {
2490                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
2491                if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2492                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2493                                                  image_state->createInfo.pQueueFamilyIndices);
2494                }
2495            } else if (object.type == kVulkanObjectTypeBuffer) {
2496                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
2497                if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2498                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2499                                                  buffer_state->createInfo.pQueueFamilyIndices);
2500                }
2501            }
2502        }
2503    }
2504
2505    return skip;
2506}
2507
2508static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2509    // Track in-use for resources off of primary and any secondary CBs
2510    bool skip = false;
2511
2512    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2513    // on device
2514    skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2515
2516    skip |= validateResources(dev_data, pCB);
2517
2518    for (auto pSubCB : pCB->linkedCommandBuffers) {
2519        skip |= validateResources(dev_data, pSubCB);
2520        // TODO: replace with invalidateCommandBuffers() at recording.
2521        if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2522            !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2523            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2524                    __LINE__, VALIDATION_ERROR_31a00092, "DS",
2525                    "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
2526                    "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
2527                    pCB->commandBuffer, pSubCB->commandBuffer, pSubCB->primaryCommandBuffer,
2528                    validation_error_map[VALIDATION_ERROR_31a00092]);
2529        }
2530    }
2531
2532    skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count, VALIDATION_ERROR_31a00090);
2533
2534    return skip;
2535}
2536
2537static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2538    bool skip = false;
2539
2540    if (pFence) {
2541        if (pFence->state == FENCE_INFLIGHT) {
2542            // TODO: opportunities for VALIDATION_ERROR_31a00080, VALIDATION_ERROR_316008b4, VALIDATION_ERROR_16400a0e
2543            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2544                            HandleToUint64(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
2545                            "Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
2546        }
2547
2548        else if (pFence->state == FENCE_RETIRED) {
2549            // TODO: opportunities for VALIDATION_ERROR_31a0007e, VALIDATION_ERROR_316008b2, VALIDATION_ERROR_16400a0e
2550            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2551                            HandleToUint64(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2552                            "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
2553                            HandleToUint64(pFence->fence));
2554        }
2555    }
2556
2557    return skip;
2558}
2559
2560static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2561                                      VkFence fence) {
2562    auto pQueue = GetQueueState(dev_data, queue);
2563    auto pFence = GetFenceNode(dev_data, fence);
2564
2565    // Mark the fence in-use.
2566    if (pFence) {
2567        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2568    }
2569
2570    // Now process each individual submit
2571    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2572        std::vector<VkCommandBuffer> cbs;
2573        const VkSubmitInfo *submit = &pSubmits[submit_idx];
2574        vector<SEMAPHORE_WAIT> semaphore_waits;
2575        vector<VkSemaphore> semaphore_signals;
2576        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2577            VkSemaphore semaphore = submit->pWaitSemaphores[i];
2578            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2579            if (pSemaphore) {
2580                if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
2581                    semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
2582                    pSemaphore->in_use.fetch_add(1);
2583                }
2584                pSemaphore->signaler.first = VK_NULL_HANDLE;
2585                pSemaphore->signaled = false;
2586            }
2587        }
2588        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2589            VkSemaphore semaphore = submit->pSignalSemaphores[i];
2590            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2591            if (pSemaphore) {
2592                pSemaphore->signaler.first = queue;
2593                pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
2594                pSemaphore->signaled = true;
2595                pSemaphore->in_use.fetch_add(1);
2596                semaphore_signals.push_back(semaphore);
2597            }
2598        }
2599        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2600            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2601            if (cb_node) {
2602                cbs.push_back(submit->pCommandBuffers[i]);
2603                for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2604                    cbs.push_back(secondaryCmdBuffer->commandBuffer);
2605                }
2606                UpdateCmdBufImageLayouts(dev_data, cb_node);
2607                incrementResources(dev_data, cb_node);
2608                for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2609                    incrementResources(dev_data, secondaryCmdBuffer);
2610                }
2611            }
2612        }
2613        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
2614                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
2615    }
2616
2617    if (pFence && !submitCount) {
2618        // If no submissions, but just dropping a fence on the end of the queue,
2619        // record an empty submission with just the fence, so we can determine
2620        // its completion.
2621        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
2622                                         fence);
2623    }
2624}
2625
2626static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2627                                       VkFence fence) {
2628    auto pFence = GetFenceNode(dev_data, fence);
2629    bool skip = ValidateFenceForSubmit(dev_data, pFence);
2630    if (skip) {
2631        return true;
2632    }
2633
2634    unordered_set<VkSemaphore> signaled_semaphores;
2635    unordered_set<VkSemaphore> unsignaled_semaphores;
2636    vector<VkCommandBuffer> current_cmds;
2637    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
2638    // Now verify each individual submit
2639    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2640        const VkSubmitInfo *submit = &pSubmits[submit_idx];
2641        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2642            skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
2643                                                 VALIDATION_ERROR_13c00098, VALIDATION_ERROR_13c0009a);
2644            VkSemaphore semaphore = submit->pWaitSemaphores[i];
2645            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2646            if (pSemaphore) {
2647                if (unsignaled_semaphores.count(semaphore) ||
2648                    (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
2649                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2650                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2651                                    "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
2652                                    HandleToUint64(semaphore));
2653                } else {
2654                    signaled_semaphores.erase(semaphore);
2655                    unsignaled_semaphores.insert(semaphore);
2656                }
2657            }
2658        }
2659        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2660            VkSemaphore semaphore = submit->pSignalSemaphores[i];
2661            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2662            if (pSemaphore) {
2663                if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
2664                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2665                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2666                                    "Queue 0x%p is signaling semaphore 0x%" PRIx64
2667                                    " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
2668                                    queue, HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
2669                } else {
2670                    unsignaled_semaphores.erase(semaphore);
2671                    signaled_semaphores.insert(semaphore);
2672                }
2673            }
2674        }
2675        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2676            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2677            if (cb_node) {
2678                skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, dev_data->imageLayoutMap, localImageLayoutMap);
2679                current_cmds.push_back(submit->pCommandBuffers[i]);
2680                skip |= validatePrimaryCommandBufferState(
2681                    dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
2682                skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
2683
2684                // Potential early exit here as bad object state may crash in delayed function calls
2685                if (skip) {
2686                    return true;
2687                }
2688
2689                // Call submit-time functions to validate/update state
2690                for (auto &function : cb_node->validate_functions) {
2691                    skip |= function();
2692                }
2693                for (auto &function : cb_node->eventUpdates) {
2694                    skip |= function(queue);
2695                }
2696                for (auto &function : cb_node->queryUpdates) {
2697                    skip |= function(queue);
2698                }
2699            }
2700        }
2701    }
2702    return skip;
2703}
2704
2705VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
2706    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2707    unique_lock_t lock(global_lock);
2708
2709    bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2710    lock.unlock();
2711
2712    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2713
2714    VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
2715
2716    lock.lock();
2717    PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2718    lock.unlock();
2719    return result;
2720}
2721
2722static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
2723    bool skip = false;
2724    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
2725        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2726                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_16c004f8, "MEM",
2727                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
2728                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
2729                        validation_error_map[VALIDATION_ERROR_16c004f8]);
2730    }
2731    return skip;
2732}
2733
2734static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
2735    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
2736    return;
2737}
2738
2739VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
2740                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
2741    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
2742    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2743    unique_lock_t lock(global_lock);
2744    bool skip = PreCallValidateAllocateMemory(dev_data);
2745    if (!skip) {
2746        lock.unlock();
2747        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
2748        lock.lock();
2749        if (VK_SUCCESS == result) {
2750            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
2751        }
2752    }
2753    return result;
2754}
2755
2756// For given obj node, if it is use, flag a validation error and return callback result, else return false
2757bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
2758                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
2759    if (dev_data->instance_data->disabled.object_in_use) return false;
2760    bool skip = false;
2761    if (obj_node->in_use.load()) {
2762        skip |=
2763            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
2764                    __LINE__, error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
2765                    object_string[obj_struct.type], obj_struct.handle, validation_error_map[error_code]);
2766    }
2767    return skip;
2768}
2769
2770static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
2771    *mem_info = GetMemObjInfo(dev_data, mem);
2772    *obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
2773    if (dev_data->instance_data->disabled.free_memory) return false;
2774    bool skip = false;
2775    if (*mem_info) {
2776        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_2880054a);
2777    }
2778    return skip;
2779}
2780
2781static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
2782    // Clear mem binding for any bound objects
2783    for (auto obj : mem_info->obj_bindings) {
2784        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle, __LINE__,
2785                MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
2786                obj.handle, HandleToUint64(mem_info->mem));
2787        switch (obj.type) {
2788            case kVulkanObjectTypeImage: {
2789                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
2790                assert(image_state);  // Any destroyed images should already be removed from bindings
2791                image_state->binding.mem = MEMORY_UNBOUND;
2792                break;
2793            }
2794            case kVulkanObjectTypeBuffer: {
2795                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
2796                assert(buffer_state);  // Any destroyed buffers should already be removed from bindings
2797                buffer_state->binding.mem = MEMORY_UNBOUND;
2798                break;
2799            }
2800            default:
2801                // Should only have buffer or image objects bound to memory
2802                assert(0);
2803        }
2804    }
2805    // Any bound cmd buffers are now invalid
2806    invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
2807    dev_data->memObjMap.erase(mem);
2808}
2809
2810VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
2811    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2812    DEVICE_MEM_INFO *mem_info = nullptr;
2813    VK_OBJECT obj_struct;
2814    unique_lock_t lock(global_lock);
2815    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
2816    if (!skip) {
2817        lock.unlock();
2818        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
2819        lock.lock();
2820        if (mem != VK_NULL_HANDLE) {
2821            PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
2822        }
2823    }
2824}
2825
2826// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
2827//  and that the size of the map range should be:
2828//  1. Not zero
2829//  2. Within the size of the memory allocation
2830static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
2831    bool skip = false;
2832
2833    if (size == 0) {
2834        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2835                       HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2836                       "VkMapMemory: Attempting to map memory range of size zero");
2837    }
2838
2839    auto mem_element = dev_data->memObjMap.find(mem);
2840    if (mem_element != dev_data->memObjMap.end()) {
2841        auto mem_info = mem_element->second.get();
2842        // It is an application error to call VkMapMemory on an object that is already mapped
2843        if (mem_info->mem_range.size != 0) {
2844            skip =
2845                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2846                        HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2847                        "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, HandleToUint64(mem));
2848        }
2849
2850        // Validate that offset + size is within object's allocationSize
2851        if (size == VK_WHOLE_SIZE) {
2852            if (offset >= mem_info->alloc_info.allocationSize) {
2853                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2854                               HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2855                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
2856                               " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
2857                               offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
2858            }
2859        } else {
2860            if ((offset + size) > mem_info->alloc_info.allocationSize) {
2861                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2862                               HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200552, "MEM",
2863                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s",
2864                               offset, size + offset, mem_info->alloc_info.allocationSize,
2865                               validation_error_map[VALIDATION_ERROR_31200552]);
2866            }
2867        }
2868    }
2869    return skip;
2870}
2871
2872static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
2873    auto mem_info = GetMemObjInfo(dev_data, mem);
2874    if (mem_info) {
2875        mem_info->mem_range.offset = offset;
2876        mem_info->mem_range.size = size;
2877    }
2878}
2879
2880static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
2881    bool skip = false;
2882    auto mem_info = GetMemObjInfo(dev_data, mem);
2883    if (mem_info) {
2884        if (!mem_info->mem_range.size) {
2885            // Valid Usage: memory must currently be mapped
2886            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2887                           HandleToUint64(mem), __LINE__, VALIDATION_ERROR_33600562, "MEM",
2888                           "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", HandleToUint64(mem),
2889                           validation_error_map[VALIDATION_ERROR_33600562]);
2890        }
2891        mem_info->mem_range.size = 0;
2892        if (mem_info->shadow_copy) {
2893            free(mem_info->shadow_copy_base);
2894            mem_info->shadow_copy_base = 0;
2895            mem_info->shadow_copy = 0;
2896        }
2897    }
2898    return skip;
2899}
2900
2901// Guard value for pad data
2902static char NoncoherentMemoryFillValue = 0xb;
2903
2904static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
2905                                     void **ppData) {
2906    auto mem_info = GetMemObjInfo(dev_data, mem);
2907    if (mem_info) {
2908        mem_info->p_driver_data = *ppData;
2909        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
2910        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
2911            mem_info->shadow_copy = 0;
2912        } else {
2913            if (size == VK_WHOLE_SIZE) {
2914                size = mem_info->alloc_info.allocationSize - offset;
2915            }
2916            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
2917            assert(SafeModulo(mem_info->shadow_pad_size,
2918                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
2919            // Ensure start of mapped region reflects hardware alignment constraints
2920            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
2921
2922            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
2923            uint64_t start_offset = offset % map_alignment;
2924            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
2925            mem_info->shadow_copy_base =
2926                malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
2927
2928            mem_info->shadow_copy =
2929                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
2930                                         ~(map_alignment - 1)) +
2931                start_offset;
2932            assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
2933                                  map_alignment) == 0);
2934
2935            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
2936            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
2937        }
2938    }
2939}
2940
2941// Verify that state for fence being waited on is appropriate. That is,
2942//  a fence being waited on should not already be signaled and
2943//  it should have been submitted on a queue or during acquire next image
2944static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
2945    bool skip = false;
2946
2947    auto pFence = GetFenceNode(dev_data, fence);
2948    if (pFence) {
2949        if (pFence->state == FENCE_UNSIGNALED) {
2950            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2951                            HandleToUint64(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2952                            "%s called for fence 0x%" PRIxLEAST64
2953                            " which has not been submitted on a Queue or during "
2954                            "acquire next image.",
2955                            apiCall, HandleToUint64(fence));
2956        }
2957    }
2958    return skip;
2959}
2960
2961static void RetireFence(layer_data *dev_data, VkFence fence) {
2962    auto pFence = GetFenceNode(dev_data, fence);
2963    if (pFence->signaler.first != VK_NULL_HANDLE) {
2964        // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
2965        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
2966    } else {
2967        // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
2968        // the fence as retired.
2969        pFence->state = FENCE_RETIRED;
2970    }
2971}
2972
2973static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
2974    if (dev_data->instance_data->disabled.wait_for_fences) return false;
2975    bool skip = false;
2976    for (uint32_t i = 0; i < fence_count; i++) {
2977        skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
2978        skip |= VerifyQueueStateToFence(dev_data, fences[i]);
2979    }
2980    return skip;
2981}
2982
2983static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
2984    // When we know that all fences are complete we can clean/remove their CBs
2985    if ((VK_TRUE == wait_all) || (1 == fence_count)) {
2986        for (uint32_t i = 0; i < fence_count; i++) {
2987            RetireFence(dev_data, fences[i]);
2988        }
2989    }
2990    // NOTE : Alternate case not handled here is when some fences have completed. In
2991    //  this case for app to guarantee which fences completed it will have to call
2992    //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
2993}
2994
2995VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
2996                                             uint64_t timeout) {
2997    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2998    // Verify fence status of submitted fences
2999    unique_lock_t lock(global_lock);
3000    bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
3001    lock.unlock();
3002    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3003
3004    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
3005
3006    if (result == VK_SUCCESS) {
3007        lock.lock();
3008        PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
3009        lock.unlock();
3010    }
3011    return result;
3012}
3013
3014static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
3015    if (dev_data->instance_data->disabled.get_fence_state) return false;
3016    return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
3017}
3018
3019static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
3020
3021VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
3022    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3023    unique_lock_t lock(global_lock);
3024    bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
3025    lock.unlock();
3026    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3027
3028    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
3029    if (result == VK_SUCCESS) {
3030        lock.lock();
3031        PostCallRecordGetFenceStatus(dev_data, fence);
3032        lock.unlock();
3033    }
3034    return result;
3035}
3036
3037static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
3038    // Add queue to tracking set only if it is new
3039    auto result = dev_data->queues.emplace(queue);
3040    if (result.second == true) {
3041        QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
3042        queue_state->queue = queue;
3043        queue_state->queueFamilyIndex = q_family_index;
3044        queue_state->seq = 0;
3045    }
3046}
3047
3048VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3049    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3050    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
3051    lock_guard_t lock(global_lock);
3052
3053    PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
3054}
3055
3056static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
3057    *queue_state = GetQueueState(dev_data, queue);
3058    if (dev_data->instance_data->disabled.queue_wait_idle) return false;
3059    return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
3060}
3061
3062static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
3063    RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
3064}
3065
3066VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
3067    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3068    QUEUE_STATE *queue_state = nullptr;
3069    unique_lock_t lock(global_lock);
3070    bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
3071    lock.unlock();
3072    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3073    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
3074    if (VK_SUCCESS == result) {
3075        lock.lock();
3076        PostCallRecordQueueWaitIdle(dev_data, queue_state);
3077        lock.unlock();
3078    }
3079    return result;
3080}
3081
3082static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
3083    if (dev_data->instance_data->disabled.device_wait_idle) return false;
3084    bool skip = false;
3085    for (auto &queue : dev_data->queueMap) {
3086        skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3087    }
3088    return skip;
3089}
3090
3091static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
3092    for (auto &queue : dev_data->queueMap) {
3093        RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3094    }
3095}
3096
3097VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
3098    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3099    unique_lock_t lock(global_lock);
3100    bool skip = PreCallValidateDeviceWaitIdle(dev_data);
3101    lock.unlock();
3102    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3103    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
3104    if (VK_SUCCESS == result) {
3105        lock.lock();
3106        PostCallRecordDeviceWaitIdle(dev_data);
3107        lock.unlock();
3108    }
3109    return result;
3110}
3111
3112static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
3113    *fence_node = GetFenceNode(dev_data, fence);
3114    *obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
3115    if (dev_data->instance_data->disabled.destroy_fence) return false;
3116    bool skip = false;
3117    if (*fence_node) {
3118        if ((*fence_node)->state == FENCE_INFLIGHT) {
3119            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3120                            HandleToUint64(fence), __LINE__, VALIDATION_ERROR_24e008c0, "DS", "Fence 0x%" PRIx64 " is in use. %s",
3121                            HandleToUint64(fence), validation_error_map[VALIDATION_ERROR_24e008c0]);
3122        }
3123    }
3124    return skip;
3125}
3126
3127static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
3128
3129VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
3130    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3131    // Common data objects used pre & post call
3132    FENCE_NODE *fence_node = nullptr;
3133    VK_OBJECT obj_struct;
3134    unique_lock_t lock(global_lock);
3135    bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
3136
3137    if (!skip) {
3138        lock.unlock();
3139        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
3140        lock.lock();
3141        PostCallRecordDestroyFence(dev_data, fence);
3142    }
3143}
3144
3145static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
3146                                            VK_OBJECT *obj_struct) {
3147    *sema_node = GetSemaphoreNode(dev_data, semaphore);
3148    *obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
3149    if (dev_data->instance_data->disabled.destroy_semaphore) return false;
3150    bool skip = false;
3151    if (*sema_node) {
3152        skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_268008e2);
3153    }
3154    return skip;
3155}
3156
3157static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
3158
3159VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
3160    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3161    SEMAPHORE_NODE *sema_node;
3162    VK_OBJECT obj_struct;
3163    unique_lock_t lock(global_lock);
3164    bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
3165    if (!skip) {
3166        lock.unlock();
3167        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
3168        lock.lock();
3169        PostCallRecordDestroySemaphore(dev_data, semaphore);
3170    }
3171}
3172
3173static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
3174    *event_state = GetEventNode(dev_data, event);
3175    *obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
3176    if (dev_data->instance_data->disabled.destroy_event) return false;
3177    bool skip = false;
3178    if (*event_state) {
3179        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_24c008f2);
3180    }
3181    return skip;
3182}
3183
3184static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
3185    invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
3186    dev_data->eventMap.erase(event);
3187}
3188
3189VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3190    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3191    EVENT_STATE *event_state = nullptr;
3192    VK_OBJECT obj_struct;
3193    unique_lock_t lock(global_lock);
3194    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
3195    if (!skip) {
3196        lock.unlock();
3197        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
3198        lock.lock();
3199        if (event != VK_NULL_HANDLE) {
3200            PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
3201        }
3202    }
3203}
3204
3205static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
3206                                            VK_OBJECT *obj_struct) {
3207    *qp_state = GetQueryPoolNode(dev_data, query_pool);
3208    *obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
3209    if (dev_data->instance_data->disabled.destroy_query_pool) return false;
3210    bool skip = false;
3211    if (*qp_state) {
3212        skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_26200632);
3213    }
3214    return skip;
3215}
3216
3217static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
3218                                           VK_OBJECT obj_struct) {
3219    invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
3220    dev_data->queryPoolMap.erase(query_pool);
3221}
3222
3223VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
3224    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3225    QUERY_POOL_NODE *qp_state = nullptr;
3226    VK_OBJECT obj_struct;
3227    unique_lock_t lock(global_lock);
3228    bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
3229    if (!skip) {
3230        lock.unlock();
3231        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
3232        lock.lock();
3233        if (queryPool != VK_NULL_HANDLE) {
3234            PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
3235        }
3236    }
3237}
3238static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3239                                               uint32_t query_count, VkQueryResultFlags flags,
3240                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3241    // TODO: clean this up, it's insanely wasteful.
3242    for (auto cmd_buffer : dev_data->commandBufferMap) {
3243        if (cmd_buffer.second->in_use.load()) {
3244            for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
3245                (*queries_in_flight)[query_state_pair.first].push_back(
3246                    cmd_buffer.first);
3247            }
3248        }
3249    }
3250    if (dev_data->instance_data->disabled.get_query_pool_results) return false;
3251    bool skip = false;
3252    for (uint32_t i = 0; i < query_count; ++i) {
3253        QueryObject query = {query_pool, first_query + i};
3254        auto qif_pair = queries_in_flight->find(query);
3255        auto query_state_pair = dev_data->queryToStateMap.find(query);
3256        if (query_state_pair != dev_data->queryToStateMap.end()) {
3257            // Available and in flight
3258            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3259                query_state_pair->second) {
3260                for (auto cmd_buffer : qif_pair->second) {
3261                    auto cb = GetCBNode(dev_data, cmd_buffer);
3262                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3263                    if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
3264                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3265                                        VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3266                                        "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
3267                                        HandleToUint64(query_pool), first_query + i);
3268                    }
3269                }
3270                // Unavailable and in flight
3271            } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3272                       !query_state_pair->second) {
3273                // TODO : Can there be the same query in use by multiple command buffers in flight?
3274                bool make_available = false;
3275                for (auto cmd_buffer : qif_pair->second) {
3276                    auto cb = GetCBNode(dev_data, cmd_buffer);
3277                    make_available |= cb->queryToStateMap[query];
3278                }
3279                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
3280                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3281                                    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3282                                    "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3283                                    HandleToUint64(query_pool), first_query + i);
3284                }
3285                // Unavailable
3286            } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
3287                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3288                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3289                                "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3290                                HandleToUint64(query_pool), first_query + i);
3291                // Uninitialized
3292            } else if (query_state_pair == dev_data->queryToStateMap.end()) {
3293                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3294                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3295                                "Cannot get query results on queryPool 0x%" PRIx64
3296                                " with index %d as data has not been collected for this index.",
3297                                HandleToUint64(query_pool), first_query + i);
3298            }
3299        }
3300    }
3301    return skip;
3302}
3303
3304static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3305                                              uint32_t query_count,
3306                                              unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3307    for (uint32_t i = 0; i < query_count; ++i) {
3308        QueryObject query = {query_pool, first_query + i};
3309        auto qif_pair = queries_in_flight->find(query);
3310        auto query_state_pair = dev_data->queryToStateMap.find(query);
3311        if (query_state_pair != dev_data->queryToStateMap.end()) {
3312            // Available and in flight
3313            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3314                query_state_pair->second) {
3315                for (auto cmd_buffer : qif_pair->second) {
3316                    auto cb = GetCBNode(dev_data, cmd_buffer);
3317                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3318                    if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
3319                        for (auto event : query_event_pair->second) {
3320                            dev_data->eventMap[event].needsSignaled = true;
3321                        }
3322                    }
3323                }
3324            }
3325        }
3326    }
3327}
3328
3329VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
3330                                                   size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
3331    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3332    unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
3333    unique_lock_t lock(global_lock);
3334    bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
3335    lock.unlock();
3336    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3337    VkResult result =
3338        dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
3339    lock.lock();
3340    PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
3341    lock.unlock();
3342    return result;
3343}
3344
3345// Return true if given ranges intersect, else false
3346// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
3347//  in an error so not checking that here
3348// pad_ranges bool indicates a linear and non-linear comparison which requires padding
3349// In the case where padding is required, if an alias is encountered then a validation error is reported and skip
3350//  may be set by the callback function so caller should merge in skip value if padding case is possible.
3351// This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
3352static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
3353                            bool skip_checks) {
3354    *skip = false;
3355    auto r1_start = range1->start;
3356    auto r1_end = range1->end;
3357    auto r2_start = range2->start;
3358    auto r2_end = range2->end;
3359    VkDeviceSize pad_align = 1;
3360    if (range1->linear != range2->linear) {
3361        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
3362    }
3363    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
3364    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
3365
3366    if (!skip_checks && (range1->linear != range2->linear)) {
3367        // In linear vs. non-linear case, warn of aliasing
3368        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
3369        const char *r1_type_str = range1->image ? "image" : "buffer";
3370        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
3371        const char *r2_type_str = range2->image ? "image" : "buffer";
3372        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
3373        *skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
3374                         MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
3375                                                           " which may indicate a bug. For further info refer to the "
3376                                                           "Buffer-Image Granularity section of the Vulkan specification. "
3377                                                           "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
3378                                                           "xhtml/vkspec.html#resources-bufferimagegranularity)",
3379                         r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
3380    }
3381    // Ranges intersect
3382    return true;
3383}
3384// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
3385bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
3386    // Create a local MEMORY_RANGE struct to wrap offset/size
3387    MEMORY_RANGE range_wrap;
3388    // Synch linear with range1 to avoid padding and potential validation error case
3389    range_wrap.linear = range1->linear;
3390    range_wrap.start = offset;
3391    range_wrap.end = end;
3392    bool tmp_bool;
3393    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
3394}
3395// For given mem_info, set all ranges valid that intersect [offset-end] range
3396// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
3397static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
3398    bool tmp_bool = false;
3399    MEMORY_RANGE map_range = {};
3400    map_range.linear = true;
3401    map_range.start = offset;
3402    map_range.end = end;
3403    for (auto &handle_range_pair : mem_info->bound_ranges) {
3404        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool, false)) {
3405            // TODO : WARN here if tmp_bool true?
3406            handle_range_pair.second.valid = true;
3407        }
3408    }
3409}
3410
3411static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
3412                                      VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
3413                                      bool is_linear, const char *api_name) {
3414    bool skip = false;
3415
3416    MEMORY_RANGE range;
3417    range.image = is_image;
3418    range.handle = handle;
3419    range.linear = is_linear;
3420    range.valid = mem_info->global_valid;
3421    range.memory = mem_info->mem;
3422    range.start = memoryOffset;
3423    range.size = memRequirements.size;
3424    range.end = memoryOffset + memRequirements.size - 1;
3425    range.aliases.clear();
3426
3427    // Check for aliasing problems.
3428    for (auto &obj_range_pair : mem_info->bound_ranges) {
3429        auto check_range = &obj_range_pair.second;
3430        bool intersection_error = false;
3431        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
3432            skip |= intersection_error;
3433            range.aliases.insert(check_range);
3434        }
3435    }
3436
3437    if (memoryOffset >= mem_info->alloc_info.allocationSize) {
3438        UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_1740082c : VALIDATION_ERROR_1700080e;
3439        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3440                       HandleToUint64(mem_info->mem), __LINE__, error_code, "MEM",
3441                       "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
3442                       "), memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ". %s",
3443                       api_name, HandleToUint64(mem_info->mem), handle, memoryOffset, mem_info->alloc_info.allocationSize,
3444                       validation_error_map[error_code]);
3445    }
3446
3447    return skip;
3448}
3449
3450// Object with given handle is being bound to memory w/ given mem_info struct.
3451//  Track the newly bound memory range with given memoryOffset
3452//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
3453//  and non-linear range incorrectly overlap.
3454// Return true if an error is flagged and the user callback returns "true", otherwise false
3455// is_image indicates an image object, otherwise handle is for a buffer
3456// is_linear indicates a buffer or linear image
3457static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
3458                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
3459    MEMORY_RANGE range;
3460
3461    range.image = is_image;
3462    range.handle = handle;
3463    range.linear = is_linear;
3464    range.valid = mem_info->global_valid;
3465    range.memory = mem_info->mem;
3466    range.start = memoryOffset;
3467    range.size = memRequirements.size;
3468    range.end = memoryOffset + memRequirements.size - 1;
3469    range.aliases.clear();
3470    // Update Memory aliasing
3471    // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
3472    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
3473    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
3474    for (auto &obj_range_pair : mem_info->bound_ranges) {
3475        auto check_range = &obj_range_pair.second;
3476        bool intersection_error = false;
3477        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
3478            range.aliases.insert(check_range);
3479            tmp_alias_ranges.insert(check_range);
3480        }
3481    }
3482    mem_info->bound_ranges[handle] = std::move(range);
3483    for (auto tmp_range : tmp_alias_ranges) {
3484        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
3485    }
3486    if (is_image)
3487        mem_info->bound_images.insert(handle);
3488    else
3489        mem_info->bound_buffers.insert(handle);
3490}
3491
3492static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
3493                                           VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
3494                                           const char *api_name) {
3495    return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
3496}
3497static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3498                                   VkMemoryRequirements mem_reqs, bool is_linear) {
3499    InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
3500}
3501
3502static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
3503                                            VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
3504    return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
3505}
3506static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3507                                    VkMemoryRequirements mem_reqs) {
3508    InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
3509}
3510
3511// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
3512//  is_image indicates if handle is for image or buffer
3513//  This function will also remove the handle-to-index mapping from the appropriate
3514//  map and clean up any aliases for range being removed.
3515static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
3516    auto erase_range = &mem_info->bound_ranges[handle];
3517    for (auto alias_range : erase_range->aliases) {
3518        alias_range->aliases.erase(erase_range);
3519    }
3520    erase_range->aliases.clear();
3521    mem_info->bound_ranges.erase(handle);
3522    if (is_image) {
3523        mem_info->bound_images.erase(handle);
3524    } else {
3525        mem_info->bound_buffers.erase(handle);
3526    }
3527}
3528
3529void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
3530
3531void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
3532
3533VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
3534    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3535    BUFFER_STATE *buffer_state = nullptr;
3536    VK_OBJECT obj_struct;
3537    unique_lock_t lock(global_lock);
3538    bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
3539    if (!skip) {
3540        lock.unlock();
3541        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
3542        lock.lock();
3543        if (buffer != VK_NULL_HANDLE) {
3544            PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
3545        }
3546    }
3547}
3548
3549VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
3550    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3551    // Common data objects used pre & post call
3552    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
3553    VK_OBJECT obj_struct;
3554    unique_lock_t lock(global_lock);
3555    // Validate state before calling down chain, update common data if we'll be calling down chain
3556    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
3557    if (!skip) {
3558        lock.unlock();
3559        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
3560        lock.lock();
3561        if (bufferView != VK_NULL_HANDLE) {
3562            PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
3563        }
3564    }
3565}
3566
3567VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
3568    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3569    IMAGE_STATE *image_state = nullptr;
3570    VK_OBJECT obj_struct;
3571    unique_lock_t lock(global_lock);
3572    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
3573    if (!skip) {
3574        lock.unlock();
3575        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
3576        lock.lock();
3577        if (image != VK_NULL_HANDLE) {
3578            PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
3579        }
3580    }
3581}
3582
3583static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
3584                                const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3585    bool skip = false;
3586    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
3587        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3588                       HandleToUint64(mem_info->mem), __LINE__, msgCode, "MT",
3589                       "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
3590                       "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
3591                       funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem),
3592                       validation_error_map[msgCode]);
3593    }
3594    return skip;
3595}
3596
3597static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3598                                            VkDeviceSize memoryOffset) {
3599    bool skip = false;
3600    if (buffer_state) {
3601        unique_lock_t lock(global_lock);
3602        // Track objects tied to memory
3603        uint64_t buffer_handle = HandleToUint64(buffer);
3604        skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
3605        if (!buffer_state->memory_requirements_checked) {
3606            // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
3607            // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
3608            // vkGetBufferMemoryRequirements()
3609            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3610                            buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
3611                            "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
3612                            " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
3613                            buffer_handle);
3614            // Make the call for them so we can verify the state
3615            lock.unlock();
3616            dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
3617            lock.lock();
3618        }
3619
3620        // Validate bound memory range information
3621        auto mem_info = GetMemObjInfo(dev_data, mem);
3622        if (mem_info) {
3623            skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements,
3624                                                    "vkBindBufferMemory()");
3625            skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
3626                                        VALIDATION_ERROR_17000816);
3627        }
3628
3629        // Validate memory requirements alignment
3630        if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
3631            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3632                            buffer_handle, __LINE__, VALIDATION_ERROR_17000818, "DS",
3633                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64
3634                            " but must be an integer multiple of the "
3635                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
3636                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3637                            memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_17000818]);
3638        }
3639
3640        // Validate memory requirements size
3641        if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
3642            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3643                            buffer_handle, __LINE__, VALIDATION_ERROR_1700081a, "DS",
3644                            "vkBindBufferMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
3645                            " but must be at least as large as "
3646                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
3647                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3648                            mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size,
3649                            validation_error_map[VALIDATION_ERROR_1700081a]);
3650        }
3651
3652        // Validate device limits alignments
3653        static const VkBufferUsageFlagBits usage_list[3] = {
3654            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
3655            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
3656        static const char *memory_type[3] = {"texel", "uniform", "storage"};
3657        static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
3658                                             "minStorageBufferOffsetAlignment"};
3659
3660        // TODO:  vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
3661        // clang-format off
3662        static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_17000810, VALIDATION_ERROR_17000812,
3663            VALIDATION_ERROR_17000814 };
3664        // clang-format on
3665
3666        // Keep this one fresh!
3667        const VkDeviceSize offset_requirement[3] = {
3668            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
3669            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
3670            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
3671        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
3672
3673        for (int i = 0; i < 3; i++) {
3674            if (usage & usage_list[i]) {
3675                if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
3676                    skip |= log_msg(
3677                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
3678                        __LINE__, msgCode[i], "DS", "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64
3679                                                    " but must be a multiple of "
3680                                                    "device limit %s 0x%" PRIxLEAST64 ". %s",
3681                        memory_type[i], memoryOffset, offset_name[i], offset_requirement[i], validation_error_map[msgCode[i]]);
3682                }
3683            }
3684        }
3685    }
3686    return skip;
3687}
3688
3689static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3690                                           VkDeviceSize memoryOffset) {
3691    if (buffer_state) {
3692        unique_lock_t lock(global_lock);
3693        // Track bound memory range information
3694        auto mem_info = GetMemObjInfo(dev_data, mem);
3695        if (mem_info) {
3696            InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
3697        }
3698
3699        // Track objects tied to memory
3700        uint64_t buffer_handle = HandleToUint64(buffer);
3701        SetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
3702
3703        buffer_state->binding.mem = mem;
3704        buffer_state->binding.offset = memoryOffset;
3705        buffer_state->binding.size = buffer_state->requirements.size;
3706    }
3707}
3708
3709VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
3710    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3711    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3712    auto buffer_state = GetBufferState(dev_data, buffer);
3713    bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
3714    if (!skip) {
3715        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
3716        if (result == VK_SUCCESS) {
3717            PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
3718        }
3719    }
3720    return result;
3721}
3722
3723VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
3724                                                       VkMemoryRequirements *pMemoryRequirements) {
3725    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3726    dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
3727    auto buffer_state = GetBufferState(dev_data, buffer);
3728    if (buffer_state) {
3729        buffer_state->requirements = *pMemoryRequirements;
3730        buffer_state->memory_requirements_checked = true;
3731    }
3732}
3733
3734VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
3735    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3736    dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
3737    auto image_state = GetImageState(dev_data, image);
3738    if (image_state) {
3739        image_state->requirements = *pMemoryRequirements;
3740        image_state->memory_requirements_checked = true;
3741    }
3742}
3743
3744VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
3745    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3746    // Common data objects used pre & post call
3747    IMAGE_VIEW_STATE *image_view_state = nullptr;
3748    VK_OBJECT obj_struct;
3749    unique_lock_t lock(global_lock);
3750    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
3751    if (!skip) {
3752        lock.unlock();
3753        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
3754        lock.lock();
3755        if (imageView != VK_NULL_HANDLE) {
3756            PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
3757        }
3758    }
3759}
3760
3761VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
3762                                               const VkAllocationCallbacks *pAllocator) {
3763    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3764
3765    unique_lock_t lock(global_lock);
3766    dev_data->shaderModuleMap.erase(shaderModule);
3767    lock.unlock();
3768
3769    dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
3770}
3771
3772static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
3773                                           VK_OBJECT *obj_struct) {
3774    *pipeline_state = getPipelineState(dev_data, pipeline);
3775    *obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
3776    if (dev_data->instance_data->disabled.destroy_pipeline) return false;
3777    bool skip = false;
3778    if (*pipeline_state) {
3779        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_25c005fa);
3780    }
3781    return skip;
3782}
3783
3784static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
3785                                          VK_OBJECT obj_struct) {
3786    // Any bound cmd buffers are now invalid
3787    invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
3788    delete getPipelineState(dev_data, pipeline);
3789    dev_data->pipelineMap.erase(pipeline);
3790}
3791
3792VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
3793    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3794    PIPELINE_STATE *pipeline_state = nullptr;
3795    VK_OBJECT obj_struct;
3796    unique_lock_t lock(global_lock);
3797    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
3798    if (!skip) {
3799        lock.unlock();
3800        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
3801        lock.lock();
3802        if (pipeline != VK_NULL_HANDLE) {
3803            PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
3804        }
3805    }
3806}
3807
3808VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
3809                                                 const VkAllocationCallbacks *pAllocator) {
3810    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3811    unique_lock_t lock(global_lock);
3812    dev_data->pipelineLayoutMap.erase(pipelineLayout);
3813    lock.unlock();
3814
3815    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
3816}
3817
3818static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
3819                                          VK_OBJECT *obj_struct) {
3820    *sampler_state = GetSamplerState(dev_data, sampler);
3821    *obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
3822    if (dev_data->instance_data->disabled.destroy_sampler) return false;
3823    bool skip = false;
3824    if (*sampler_state) {
3825        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_26600874);
3826    }
3827    return skip;
3828}
3829
3830static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
3831                                         VK_OBJECT obj_struct) {
3832    // Any bound cmd buffers are now invalid
3833    if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
3834    dev_data->samplerMap.erase(sampler);
3835}
3836
3837VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
3838    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3839    SAMPLER_STATE *sampler_state = nullptr;
3840    VK_OBJECT obj_struct;
3841    unique_lock_t lock(global_lock);
3842    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
3843    if (!skip) {
3844        lock.unlock();
3845        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
3846        lock.lock();
3847        if (sampler != VK_NULL_HANDLE) {
3848            PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
3849        }
3850    }
3851}
3852
3853static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
3854    dev_data->descriptorSetLayoutMap.erase(ds_layout);
3855}
3856
3857VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
3858                                                      const VkAllocationCallbacks *pAllocator) {
3859    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3860    dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
3861    unique_lock_t lock(global_lock);
3862    PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
3863}
3864
3865static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
3866                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
3867    *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
3868    *obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
3869    if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
3870    bool skip = false;
3871    if (*desc_pool_state) {
3872        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_2440025e);
3873    }
3874    return skip;
3875}
3876
3877static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
3878                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
3879    // Any bound cmd buffers are now invalid
3880    invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
3881    // Free sets that were in this pool
3882    for (auto ds : desc_pool_state->sets) {
3883        freeDescriptorSet(dev_data, ds);
3884    }
3885    dev_data->descriptorPoolMap.erase(descriptorPool);
3886    delete desc_pool_state;
3887}
3888
3889VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
3890                                                 const VkAllocationCallbacks *pAllocator) {
3891    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3892    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
3893    VK_OBJECT obj_struct;
3894    unique_lock_t lock(global_lock);
3895    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
3896    if (!skip) {
3897        lock.unlock();
3898        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
3899        lock.lock();
3900        if (descriptorPool != VK_NULL_HANDLE) {
3901            PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
3902        }
3903    }
3904}
3905// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
3906//  If this is a secondary command buffer, then make sure its primary is also in-flight
3907//  If primary is not in-flight, then remove secondary from global in-flight set
3908// This function is only valid at a point when cmdBuffer is being reset or freed
3909static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
3910                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
3911    bool skip = false;
3912    if (cb_node->in_use.load()) {
3913        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3914                        HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
3915                        "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
3916                        validation_error_map[error_code]);
3917    }
3918    return skip;
3919}
3920
3921// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
3922static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
3923                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
3924    bool skip = false;
3925    for (auto cmd_buffer : pPool->commandBuffers) {
3926        skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
3927    }
3928    return skip;
3929}
3930
3931VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
3932                                              const VkCommandBuffer *pCommandBuffers) {
3933    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3934    bool skip = false;
3935    unique_lock_t lock(global_lock);
3936
3937    for (uint32_t i = 0; i < commandBufferCount; i++) {
3938        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
3939        // Delete CB information structure, and remove from commandBufferMap
3940        if (cb_node) {
3941            skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_2840005e);
3942        }
3943    }
3944
3945    if (skip) return;
3946
3947    auto pPool = GetCommandPoolNode(dev_data, commandPool);
3948    for (uint32_t i = 0; i < commandBufferCount; i++) {
3949        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
3950        // Delete CB information structure, and remove from commandBufferMap
3951        if (cb_node) {
3952            // reset prior to delete for data clean-up
3953            // TODO: fix this, it's insane.
3954            resetCB(dev_data, cb_node->commandBuffer);
3955            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
3956            delete cb_node;
3957        }
3958
3959        // Remove commandBuffer reference from commandPoolMap
3960        pPool->commandBuffers.remove(pCommandBuffers[i]);
3961    }
3962    lock.unlock();
3963
3964    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
3965}
3966
3967VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
3968                                                 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
3969    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3970
3971    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
3972
3973    if (VK_SUCCESS == result) {
3974        lock_guard_t lock(global_lock);
3975        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
3976        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
3977    }
3978    return result;
3979}
3980
3981VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
3982                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
3983    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3984    bool skip = false;
3985    if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
3986        if (!dev_data->enabled_features.pipelineStatisticsQuery) {
3987            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3988                            __LINE__, VALIDATION_ERROR_11c0062e, "DS",
3989                            "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
3990                            "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
3991                            validation_error_map[VALIDATION_ERROR_11c0062e]);
3992        }
3993    }
3994
3995    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3996    if (!skip) {
3997        result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
3998    }
3999    if (result == VK_SUCCESS) {
4000        lock_guard_t lock(global_lock);
4001        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
4002        qp_node->createInfo = *pCreateInfo;
4003    }
4004    return result;
4005}
4006
4007static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
4008    *cp_state = GetCommandPoolNode(dev_data, pool);
4009    if (dev_data->instance_data->disabled.destroy_command_pool) return false;
4010    bool skip = false;
4011    if (*cp_state) {
4012        // Verify that command buffers in pool are complete (not in-flight)
4013        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_24000052);
4014    }
4015    return skip;
4016}
4017
4018static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
4019    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
4020    for (auto cb : cp_state->commandBuffers) {
4021        auto cb_node = GetCBNode(dev_data, cb);
4022        clear_cmd_buf_and_mem_references(dev_data, cb_node);
4023        // Remove references to this cb_node prior to delete
4024        // TODO : Need better solution here, resetCB?
4025        for (auto obj : cb_node->object_bindings) {
4026            removeCommandBufferBinding(dev_data, &obj, cb_node);
4027        }
4028        for (auto framebuffer : cb_node->framebuffers) {
4029            auto fb_state = GetFramebufferState(dev_data, framebuffer);
4030            if (fb_state) fb_state->cb_bindings.erase(cb_node);
4031        }
4032        dev_data->commandBufferMap.erase(cb);  // Remove this command buffer
4033        delete cb_node;                        // delete CB info structure
4034    }
4035    dev_data->commandPoolMap.erase(pool);
4036}
4037
4038// Destroy commandPool along with all of the commandBuffers allocated from that pool
4039VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
4040    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4041    COMMAND_POOL_NODE *cp_state = nullptr;
4042    unique_lock_t lock(global_lock);
4043    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
4044    if (!skip) {
4045        lock.unlock();
4046        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
4047        lock.lock();
4048        if (commandPool != VK_NULL_HANDLE) {
4049            PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
4050        }
4051    }
4052}
4053
4054VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4055    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4056    bool skip = false;
4057
4058    unique_lock_t lock(global_lock);
4059    auto pPool = GetCommandPoolNode(dev_data, commandPool);
4060    skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_32800050);
4061    lock.unlock();
4062
4063    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4064
4065    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
4066
4067    // Reset all of the CBs allocated from this pool
4068    if (VK_SUCCESS == result) {
4069        lock.lock();
4070        for (auto cmdBuffer : pPool->commandBuffers) {
4071            resetCB(dev_data, cmdBuffer);
4072        }
4073        lock.unlock();
4074    }
4075    return result;
4076}
4077
4078VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4079    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4080    bool skip = false;
4081    unique_lock_t lock(global_lock);
4082    for (uint32_t i = 0; i < fenceCount; ++i) {
4083        auto pFence = GetFenceNode(dev_data, pFences[i]);
4084        if (pFence && pFence->state == FENCE_INFLIGHT) {
4085            skip |=
4086                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4087                        HandleToUint64(pFences[i]), __LINE__, VALIDATION_ERROR_32e008c6, "DS", "Fence 0x%" PRIx64 " is in use. %s",
4088                        HandleToUint64(pFences[i]), validation_error_map[VALIDATION_ERROR_32e008c6]);
4089        }
4090    }
4091    lock.unlock();
4092
4093    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4094
4095    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
4096
4097    if (result == VK_SUCCESS) {
4098        lock.lock();
4099        for (uint32_t i = 0; i < fenceCount; ++i) {
4100            auto pFence = GetFenceNode(dev_data, pFences[i]);
4101            if (pFence) {
4102                pFence->state = FENCE_UNSIGNALED;
4103            }
4104        }
4105        lock.unlock();
4106    }
4107
4108    return result;
4109}
4110
4111// For given cb_nodes, invalidate them and track object causing invalidation
4112void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
4113    for (auto cb_node : cb_nodes) {
4114        if (cb_node->state == CB_RECORDING) {
4115            log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4116                    HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4117                    "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
4118            cb_node->state = CB_INVALID_INCOMPLETE;
4119        }
4120        else {
4121            cb_node->state = CB_INVALID_COMPLETE;
4122        }
4123        cb_node->broken_bindings.push_back(obj);
4124
4125        // if secondary, then propagate the invalidation to the primaries that will call us.
4126        if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4127            invalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4128        }
4129    }
4130}
4131
4132static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
4133                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
4134    *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
4135    *obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4136    if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
4137    bool skip = false;
4138    if (*framebuffer_state) {
4139        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_250006f8);
4140    }
4141    return skip;
4142}
4143
4144static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
4145                                             VK_OBJECT obj_struct) {
4146    invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
4147    dev_data->frameBufferMap.erase(framebuffer);
4148}
4149
4150VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
4151    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4152    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
4153    VK_OBJECT obj_struct;
4154    unique_lock_t lock(global_lock);
4155    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
4156    if (!skip) {
4157        lock.unlock();
4158        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
4159        lock.lock();
4160        if (framebuffer != VK_NULL_HANDLE) {
4161            PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
4162        }
4163    }
4164}
4165
4166static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
4167                                             VK_OBJECT *obj_struct) {
4168    *rp_state = GetRenderPassState(dev_data, render_pass);
4169    *obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
4170    if (dev_data->instance_data->disabled.destroy_renderpass) return false;
4171    bool skip = false;
4172    if (*rp_state) {
4173        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_264006d2);
4174    }
4175    return skip;
4176}
4177
4178static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
4179                                            VK_OBJECT obj_struct) {
4180    invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
4181    dev_data->renderPassMap.erase(render_pass);
4182}
4183
4184VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4185    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4186    RENDER_PASS_STATE *rp_state = nullptr;
4187    VK_OBJECT obj_struct;
4188    unique_lock_t lock(global_lock);
4189    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
4190    if (!skip) {
4191        lock.unlock();
4192        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
4193        lock.lock();
4194        if (renderPass != VK_NULL_HANDLE) {
4195            PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
4196        }
4197    }
4198}
4199
4200VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
4201                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
4202    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4203    unique_lock_t lock(global_lock);
4204    bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
4205    lock.unlock();
4206
4207    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4208    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
4209
4210    if (VK_SUCCESS == result) {
4211        lock.lock();
4212        PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
4213        lock.unlock();
4214    }
4215    return result;
4216}
4217
4218VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
4219                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
4220    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4221    unique_lock_t lock(global_lock);
4222    bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
4223    lock.unlock();
4224    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4225    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
4226    if (VK_SUCCESS == result) {
4227        lock.lock();
4228        PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
4229        lock.unlock();
4230    }
4231    return result;
4232}
4233
4234// Access helper functions for external modules
4235const VkFormatProperties *GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
4236    VkFormatProperties *format_properties = new VkFormatProperties;
4237    instance_layer_data *instance_data =
4238        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4239    instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, format_properties);
4240    return format_properties;
4241}
4242
4243const VkImageFormatProperties *GetImageFormatProperties(core_validation::layer_data *device_data, VkFormat format,
4244                                                        VkImageType image_type, VkImageTiling tiling, VkImageUsageFlags usage,
4245                                                        VkImageCreateFlags flags) {
4246    VkImageFormatProperties *image_format_properties = new VkImageFormatProperties;
4247    instance_layer_data *instance_data =
4248        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4249    instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(device_data->physical_device, format, image_type, tiling,
4250                                                                         usage, flags, image_format_properties);
4251    return image_format_properties;
4252}
4253
4254const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
4255
4256const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
4257    return &device_data->phys_dev_props;
4258}
4259
4260const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
4261
4262std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
4263    return &device_data->imageMap;
4264}
4265
4266std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
4267    return &device_data->imageSubresourceMap;
4268}
4269
4270std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
4271    return &device_data->imageLayoutMap;
4272}
4273
4274std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
4275    return &device_data->imageLayoutMap;
4276}
4277
4278std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
4279    return &device_data->bufferMap;
4280}
4281
4282std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
4283    return &device_data->bufferViewMap;
4284}
4285
4286std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
4287    return &device_data->imageViewMap;
4288}
4289
4290const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) {
4291    return &device_data->phys_dev_properties;
4292}
4293
4294const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) {
4295    return &device_data->enabled_features;
4296}
4297
4298const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
4299
4300VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
4301                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
4302    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4303    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4304    bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
4305    if (!skip) {
4306        result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
4307    }
4308    if (VK_SUCCESS == result) {
4309        lock_guard_t lock(global_lock);
4310        PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
4311    }
4312    return result;
4313}
4314
4315VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
4316                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
4317    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4318    unique_lock_t lock(global_lock);
4319    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
4320    lock.unlock();
4321    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4322    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
4323    if (VK_SUCCESS == result) {
4324        lock.lock();
4325        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
4326        lock.unlock();
4327    }
4328
4329    return result;
4330}
4331
4332VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
4333                                           const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
4334    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4335    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
4336    if (VK_SUCCESS == result) {
4337        lock_guard_t lock(global_lock);
4338        auto &fence_node = dev_data->fenceMap[*pFence];
4339        fence_node.fence = *pFence;
4340        fence_node.createInfo = *pCreateInfo;
4341        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
4342    }
4343    return result;
4344}
4345
4346// TODO handle pipeline caches
4347VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
4348                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
4349    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4350    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
4351    return result;
4352}
4353
4354VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
4355                                                const VkAllocationCallbacks *pAllocator) {
4356    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4357    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
4358}
4359
4360VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
4361                                                    void *pData) {
4362    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4363    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
4364    return result;
4365}
4366
4367VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
4368                                                   const VkPipelineCache *pSrcCaches) {
4369    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4370    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
4371    return result;
4372}
4373
4374// utility function to set collective state for pipeline
4375void set_pipeline_state(PIPELINE_STATE *pPipe) {
4376    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
4377    if (pPipe->graphicsPipelineCI.pColorBlendState) {
4378        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
4379            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
4380                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4381                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4382                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4383                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4384                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4385                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4386                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4387                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
4388                    pPipe->blendConstantsEnabled = true;
4389                }
4390            }
4391        }
4392    }
4393}
4394
4395bool validate_dual_src_blend_feature(layer_data *device_data, PIPELINE_STATE *pipe_state) {
4396    bool skip = false;
4397    if (pipe_state->graphicsPipelineCI.pColorBlendState) {
4398        for (size_t i = 0; i < pipe_state->attachments.size(); ++i) {
4399            if (!device_data->enabled_features.dualSrcBlend) {
4400                if ((pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4401                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4402                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4403                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA) ||
4404                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4405                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4406                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4407                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
4408                    skip |=
4409                        log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
4410                                HandleToUint64(pipe_state->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
4411                                "CmdBindPipeline: vkPipeline (0x%" PRIxLEAST64 ") attachment[" PRINTF_SIZE_T_SPECIFIER
4412                                "] has a dual-source blend factor but this device feature is not enabled.",
4413                                HandleToUint64(pipe_state->pipeline), i);
4414                }
4415            }
4416        }
4417    }
4418    return skip;
4419}
4420
4421VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4422                                                       const VkGraphicsPipelineCreateInfo *pCreateInfos,
4423                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4424    // TODO What to do with pipelineCache?
4425    // The order of operations here is a little convoluted but gets the job done
4426    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
4427    //  2. Create state is then validated (which uses flags setup during shadowing)
4428    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
4429    bool skip = false;
4430    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
4431    vector<PIPELINE_STATE *> pipe_state(count);
4432    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4433
4434    uint32_t i = 0;
4435    unique_lock_t lock(global_lock);
4436
4437    for (i = 0; i < count; i++) {
4438        pipe_state[i] = new PIPELINE_STATE;
4439        pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
4440        pipe_state[i]->render_pass_ci.initialize(GetRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
4441        pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4442    }
4443
4444    for (i = 0; i < count; i++) {
4445        skip |= ValidatePipelineLocked(dev_data, pipe_state, i);
4446    }
4447
4448    lock.unlock();
4449
4450    for (i = 0; i < count; i++) {
4451        skip |= ValidatePipelineUnlocked(dev_data, pipe_state, i);
4452    }
4453
4454    if (skip) {
4455        for (i = 0; i < count; i++) {
4456            delete pipe_state[i];
4457            pPipelines[i] = VK_NULL_HANDLE;
4458        }
4459        return VK_ERROR_VALIDATION_FAILED_EXT;
4460    }
4461
4462    auto result =
4463        dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4464    lock.lock();
4465    for (i = 0; i < count; i++) {
4466        if (pPipelines[i] == VK_NULL_HANDLE) {
4467            delete pipe_state[i];
4468        } else {
4469            pipe_state[i]->pipeline = pPipelines[i];
4470            dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
4471        }
4472    }
4473
4474    return result;
4475}
4476
4477VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4478                                                      const VkComputePipelineCreateInfo *pCreateInfos,
4479                                                      const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4480    bool skip = false;
4481
4482    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
4483    vector<PIPELINE_STATE *> pPipeState(count);
4484    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4485
4486    uint32_t i = 0;
4487    unique_lock_t lock(global_lock);
4488    for (i = 0; i < count; i++) {
4489        // TODO: Verify compute stage bits
4490
4491        // Create and initialize internal tracking data structure
4492        pPipeState[i] = new PIPELINE_STATE;
4493        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
4494        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4495
4496        // TODO: Add Compute Pipeline Verification
4497        skip |= validate_compute_pipeline(dev_data, pPipeState[i]);
4498    }
4499
4500    if (skip) {
4501        for (i = 0; i < count; i++) {
4502            // Clean up any locally allocated data structures
4503            delete pPipeState[i];
4504            pPipelines[i] = VK_NULL_HANDLE;
4505        }
4506        return VK_ERROR_VALIDATION_FAILED_EXT;
4507    }
4508
4509    lock.unlock();
4510    auto result =
4511        dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4512    lock.lock();
4513    for (i = 0; i < count; i++) {
4514        if (pPipelines[i] == VK_NULL_HANDLE) {
4515            delete pPipeState[i];
4516        } else {
4517            pPipeState[i]->pipeline = pPipelines[i];
4518            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
4519        }
4520    }
4521
4522    return result;
4523}
4524
4525VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
4526                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
4527    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4528    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
4529    if (VK_SUCCESS == result) {
4530        lock_guard_t lock(global_lock);
4531        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
4532    }
4533    return result;
4534}
4535
4536static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
4537    if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
4538    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
4539}
4540
4541static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
4542                                                    VkDescriptorSetLayout set_layout) {
4543    dev_data->descriptorSetLayoutMap[set_layout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(create_info, set_layout);
4544}
4545
4546VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
4547                                                         const VkAllocationCallbacks *pAllocator,
4548                                                         VkDescriptorSetLayout *pSetLayout) {
4549    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4550    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4551    unique_lock_t lock(global_lock);
4552    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
4553    if (!skip) {
4554        lock.unlock();
4555        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
4556        if (VK_SUCCESS == result) {
4557            lock.lock();
4558            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
4559        }
4560    }
4561    return result;
4562}
4563
4564// Used by CreatePipelineLayout and CmdPushConstants.
4565// Note that the index argument is optional and only used by CreatePipelineLayout.
4566static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
4567                                      const char *caller_name, uint32_t index = 0) {
4568    if (dev_data->instance_data->disabled.push_constant_range) return false;
4569    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
4570    bool skip = false;
4571    // Check that offset + size don't exceed the max.
4572    // Prevent arithetic overflow here by avoiding addition and testing in this order.
4573    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
4574        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
4575        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4576            if (offset >= maxPushConstantsSize) {
4577                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4578                                __LINE__, VALIDATION_ERROR_11a0024c, "DS",
4579                                "%s call has push constants index %u with offset %u that "
4580                                "exceeds this device's maxPushConstantSize of %u. %s",
4581                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_11a0024c]);
4582            }
4583            if (size > maxPushConstantsSize - offset) {
4584                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4585                                __LINE__, VALIDATION_ERROR_11a00254, "DS",
4586                                "%s call has push constants index %u with offset %u and size %u that "
4587                                "exceeds this device's maxPushConstantSize of %u. %s",
4588                                caller_name, index, offset, size, maxPushConstantsSize,
4589                                validation_error_map[VALIDATION_ERROR_11a00254]);
4590            }
4591        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4592            if (offset >= maxPushConstantsSize) {
4593                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4594                                __LINE__, VALIDATION_ERROR_1bc002e4, "DS",
4595                                "%s call has push constants index %u with offset %u that "
4596                                "exceeds this device's maxPushConstantSize of %u. %s",
4597                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_1bc002e4]);
4598            }
4599            if (size > maxPushConstantsSize - offset) {
4600                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4601                                __LINE__, VALIDATION_ERROR_1bc002e6, "DS",
4602                                "%s call has push constants index %u with offset %u and size %u that "
4603                                "exceeds this device's maxPushConstantSize of %u. %s",
4604                                caller_name, index, offset, size, maxPushConstantsSize,
4605                                validation_error_map[VALIDATION_ERROR_1bc002e6]);
4606            }
4607        } else {
4608            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4609                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4610        }
4611    }
4612    // size needs to be non-zero and a multiple of 4.
4613    if ((size == 0) || ((size & 0x3) != 0)) {
4614        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4615            if (size == 0) {
4616                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4617                                __LINE__, VALIDATION_ERROR_11a00250, "DS",
4618                                "%s call has push constants index %u with "
4619                                "size %u. Size must be greater than zero. %s",
4620                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_11a00250]);
4621            }
4622            if (size & 0x3) {
4623                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4624                                __LINE__, VALIDATION_ERROR_11a00252, "DS",
4625                                "%s call has push constants index %u with "
4626                                "size %u. Size must be a multiple of 4. %s",
4627                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_11a00252]);
4628            }
4629        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4630            if (size == 0) {
4631                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4632                                __LINE__, VALIDATION_ERROR_1bc2c21b, "DS",
4633                                "%s call has push constants index %u with "
4634                                "size %u. Size must be greater than zero. %s",
4635                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_1bc2c21b]);
4636            }
4637            if (size & 0x3) {
4638                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4639                                __LINE__, VALIDATION_ERROR_1bc002e2, "DS",
4640                                "%s call has push constants index %u with "
4641                                "size %u. Size must be a multiple of 4. %s",
4642                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_1bc002e2]);
4643            }
4644        } else {
4645            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4646                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4647        }
4648    }
4649    // offset needs to be a multiple of 4.
4650    if ((offset & 0x3) != 0) {
4651        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4652            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4653                            __LINE__, VALIDATION_ERROR_11a0024e, "DS",
4654                            "%s call has push constants index %u with "
4655                            "offset %u. Offset must be a multiple of 4. %s",
4656                            caller_name, index, offset, validation_error_map[VALIDATION_ERROR_11a0024e]);
4657        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4658            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4659                            __LINE__, VALIDATION_ERROR_1bc002e0, "DS",
4660                            "%s call has push constants with "
4661                            "offset %u. Offset must be a multiple of 4. %s",
4662                            caller_name, offset, validation_error_map[VALIDATION_ERROR_1bc002e0]);
4663        } else {
4664            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4665                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4666        }
4667    }
4668    return skip;
4669}
4670
4671VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
4672                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
4673    bool skip = false;
4674    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4675    // TODO : Add checks for VALIDATION_ERRORS 865-870
4676    // Push Constant Range checks
4677    uint32_t i, j;
4678    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4679        skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
4680                                          pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
4681        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
4682            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4683                            __LINE__, VALIDATION_ERROR_11a2dc03, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
4684                            validation_error_map[VALIDATION_ERROR_11a2dc03]);
4685        }
4686    }
4687    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4688
4689    // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
4690    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4691        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
4692            if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
4693                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4694                                __LINE__, VALIDATION_ERROR_0fe00248, "DS",
4695                                "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d. %s", i, j,
4696                                validation_error_map[VALIDATION_ERROR_0fe00248]);
4697            }
4698        }
4699    }
4700
4701    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
4702    if (VK_SUCCESS == result) {
4703        lock_guard_t lock(global_lock);
4704        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
4705        plNode.layout = *pPipelineLayout;
4706        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
4707        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
4708            plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
4709        }
4710        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
4711        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4712            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
4713        }
4714    }
4715    return result;
4716}
4717
4718VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
4719                                                    const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
4720    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4721    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
4722    if (VK_SUCCESS == result) {
4723        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
4724        if (NULL == pNewNode) {
4725            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4726                        HandleToUint64(*pDescriptorPool), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4727                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
4728                return VK_ERROR_VALIDATION_FAILED_EXT;
4729        } else {
4730            lock_guard_t lock(global_lock);
4731            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
4732        }
4733    } else {
4734        // Need to do anything if pool create fails?
4735    }
4736    return result;
4737}
4738
4739VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4740                                                   VkDescriptorPoolResetFlags flags) {
4741    // TODO : Add checks for VALIDATION_ERROR_32a00272
4742    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4743    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
4744    if (VK_SUCCESS == result) {
4745        lock_guard_t lock(global_lock);
4746        clearDescriptorPool(dev_data, device, descriptorPool, flags);
4747    }
4748    return result;
4749}
4750// Ensure the pool contains enough descriptors and descriptor sets to satisfy
4751// an allocation request. Fills common_data with the total number of descriptors of each type required,
4752// as well as DescriptorSetLayout ptrs used for later update.
4753static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4754                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
4755    // Always update common data
4756    cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
4757    if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
4758    // All state checks for AllocateDescriptorSets is done in single function
4759    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
4760}
4761// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
4762static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4763                                                 VkDescriptorSet *pDescriptorSets,
4764                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
4765    // All the updates are contained in a single cvdescriptorset function
4766    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
4767                                                   &dev_data->setMap, dev_data);
4768}
4769
4770// TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
4771VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4772                                                      VkDescriptorSet *pDescriptorSets) {
4773    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4774    unique_lock_t lock(global_lock);
4775    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
4776    bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
4777    lock.unlock();
4778
4779    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4780
4781    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
4782
4783    if (VK_SUCCESS == result) {
4784        lock.lock();
4785        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
4786        lock.unlock();
4787    }
4788    return result;
4789}
4790// Verify state before freeing DescriptorSets
4791static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
4792                                              const VkDescriptorSet *descriptor_sets) {
4793    if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
4794    bool skip = false;
4795    // First make sure sets being destroyed are not currently in-use
4796    for (uint32_t i = 0; i < count; ++i) {
4797        if (descriptor_sets[i] != VK_NULL_HANDLE) {
4798            skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
4799        }
4800    }
4801
4802    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
4803    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
4804        // Can't Free from a NON_FREE pool
4805        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4806                        HandleToUint64(pool), __LINE__, VALIDATION_ERROR_28600270, "DS",
4807                        "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
4808                        "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
4809                        validation_error_map[VALIDATION_ERROR_28600270]);
4810    }
4811    return skip;
4812}
4813// Sets have been removed from the pool so update underlying state
4814static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
4815                                             const VkDescriptorSet *descriptor_sets) {
4816    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
4817    // Update available descriptor sets in pool
4818    pool_state->availableSets += count;
4819
4820    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
4821    for (uint32_t i = 0; i < count; ++i) {
4822        if (descriptor_sets[i] != VK_NULL_HANDLE) {
4823            auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
4824            uint32_t type_index = 0, descriptor_count = 0;
4825            for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
4826                type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
4827                descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
4828                pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
4829            }
4830            freeDescriptorSet(dev_data, descriptor_set);
4831            pool_state->sets.erase(descriptor_set);
4832        }
4833    }
4834}
4835
4836VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
4837                                                  const VkDescriptorSet *pDescriptorSets) {
4838    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4839    // Make sure that no sets being destroyed are in-flight
4840    unique_lock_t lock(global_lock);
4841    bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
4842    lock.unlock();
4843
4844    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4845    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
4846    if (VK_SUCCESS == result) {
4847        lock.lock();
4848        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
4849        lock.unlock();
4850    }
4851    return result;
4852}
4853// TODO : This is a Proof-of-concept for core validation architecture
4854//  Really we'll want to break out these functions to separate files but
4855//  keeping it all together here to prove out design
4856// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
4857static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
4858                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4859                                                const VkCopyDescriptorSet *pDescriptorCopies) {
4860    if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
4861    // First thing to do is perform map look-ups.
4862    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
4863    //  so we can't just do a single map look-up up-front, but do them individually in functions below
4864
4865    // Now make call(s) that validate state, but don't perform state updates in this function
4866    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
4867    //  namespace which will parse params and make calls into specific class instances
4868    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
4869                                                         descriptorCopyCount, pDescriptorCopies);
4870}
4871// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
4872static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
4873                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4874                                               const VkCopyDescriptorSet *pDescriptorCopies) {
4875    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4876                                                 pDescriptorCopies);
4877}
4878
4879VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
4880                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4881                                                const VkCopyDescriptorSet *pDescriptorCopies) {
4882    // Only map look-up at top level is for device-level layer_data
4883    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4884    unique_lock_t lock(global_lock);
4885    bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4886                                                    pDescriptorCopies);
4887    lock.unlock();
4888    if (!skip) {
4889        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4890                                                      pDescriptorCopies);
4891        lock.lock();
4892        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
4893        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4894                                           pDescriptorCopies);
4895    }
4896}
4897
4898VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
4899                                                      VkCommandBuffer *pCommandBuffer) {
4900    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4901    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
4902    if (VK_SUCCESS == result) {
4903        unique_lock_t lock(global_lock);
4904        auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
4905
4906        if (pPool) {
4907            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
4908                // Add command buffer to its commandPool map
4909                pPool->commandBuffers.push_back(pCommandBuffer[i]);
4910                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
4911                // Add command buffer to map
4912                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
4913                resetCB(dev_data, pCommandBuffer[i]);
4914                pCB->createInfo = *pCreateInfo;
4915                pCB->device = device;
4916            }
4917        }
4918        lock.unlock();
4919    }
4920    return result;
4921}
4922
4923// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
4924static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
4925    addCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
4926                            cb_state);
4927    for (auto attachment : fb_state->attachments) {
4928        auto view_state = attachment.view_state;
4929        if (view_state) {
4930            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
4931        }
4932        auto rp_state = GetRenderPassState(dev_data, fb_state->createInfo.renderPass);
4933        if (rp_state) {
4934            addCommandBufferBinding(&rp_state->cb_bindings, {HandleToUint64(rp_state->renderPass), kVulkanObjectTypeRenderPass},
4935                                    cb_state);
4936        }
4937    }
4938}
4939
4940VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
4941    bool skip = false;
4942    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
4943    unique_lock_t lock(global_lock);
4944    // Validate command buffer level
4945    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
4946    if (cb_node) {
4947        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
4948        if (cb_node->in_use.load()) {
4949            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4950                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "MEM",
4951                            "Calling vkBeginCommandBuffer() on active command buffer %p before it has completed. "
4952                            "You must check command buffer fence before this call. %s",
4953                            commandBuffer, validation_error_map[VALIDATION_ERROR_16e00062]);
4954        }
4955        clear_cmd_buf_and_mem_references(dev_data, cb_node);
4956        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
4957            // Secondary Command Buffer
4958            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
4959            if (!pInfo) {
4960                skip |=
4961                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4962                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00066, "DS",
4963                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s", commandBuffer,
4964                            validation_error_map[VALIDATION_ERROR_16e00066]);
4965            } else {
4966                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
4967                    assert(pInfo->renderPass);
4968                    string errorString = "";
4969                    auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
4970                    if (framebuffer) {
4971                        if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
4972                            !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
4973                                                             GetRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
4974                                                             errorString)) {
4975                            // renderPass that framebuffer was created with must be compatible with local renderPass
4976                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4977                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4978                                            VALIDATION_ERROR_0280006e, "DS",
4979                                            "vkBeginCommandBuffer(): Secondary Command "
4980                                            "Buffer (0x%p) renderPass (0x%" PRIxLEAST64
4981                                            ") is incompatible w/ framebuffer "
4982                                            "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
4983                                            commandBuffer, HandleToUint64(pInfo->renderPass), HandleToUint64(pInfo->framebuffer),
4984                                            HandleToUint64(framebuffer->createInfo.renderPass), errorString.c_str(),
4985                                            validation_error_map[VALIDATION_ERROR_0280006e]);
4986                        }
4987                        // Connect this framebuffer and its children to this cmdBuffer
4988                        AddFramebufferBinding(dev_data, cb_node, framebuffer);
4989                    }
4990                }
4991                if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
4992                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
4993                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4994                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4995                                    VALIDATION_ERROR_16e00068, "DS",
4996                                    "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
4997                                    "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
4998                                    "support precise occlusion queries. %s",
4999                                    commandBuffer, validation_error_map[VALIDATION_ERROR_16e00068]);
5000                }
5001            }
5002            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
5003                auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
5004                if (renderPass) {
5005                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
5006                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5007                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
5008                                        VALIDATION_ERROR_0280006c, "DS",
5009                                        "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
5010                                        "that is less than the number of subpasses (%d). %s",
5011                                        commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
5012                                        validation_error_map[VALIDATION_ERROR_0280006c]);
5013                    }
5014                }
5015            }
5016        }
5017        if (CB_RECORDING == cb_node->state) {
5018            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5019                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "DS",
5020                            "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
5021                            ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
5022                            commandBuffer, validation_error_map[VALIDATION_ERROR_16e00062]);
5023        } else if (CB_RECORDED == cb_node->state || CB_INVALID_COMPLETE == cb_node->state) {
5024            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
5025            auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5026            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5027                skip |=
5028                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5029                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00064, "DS",
5030                            "Call to vkBeginCommandBuffer() on command buffer (0x%p"
5031                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
5032                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5033                            commandBuffer, HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_16e00064]);
5034            }
5035            resetCB(dev_data, commandBuffer);
5036        }
5037        // Set updated state here in case implicit reset occurs above
5038        cb_node->state = CB_RECORDING;
5039        cb_node->beginInfo = *pBeginInfo;
5040        if (cb_node->beginInfo.pInheritanceInfo) {
5041            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
5042            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
5043            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
5044            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
5045                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5046                cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
5047                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
5048                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
5049                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
5050            }
5051        }
5052    }
5053    lock.unlock();
5054    if (skip) {
5055        return VK_ERROR_VALIDATION_FAILED_EXT;
5056    }
5057    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
5058
5059    return result;
5060}
5061
5062VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
5063    bool skip = false;
5064    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5065    unique_lock_t lock(global_lock);
5066    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5067    if (pCB) {
5068        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
5069            !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5070            // This needs spec clarification to update valid usage, see comments in PR:
5071            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
5072            skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_27400078);
5073        }
5074        skip |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
5075        for (auto query : pCB->activeQueries) {
5076            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5077                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_2740007a, "DS",
5078                            "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
5079                            HandleToUint64(query.pool), query.index, validation_error_map[VALIDATION_ERROR_2740007a]);
5080        }
5081    }
5082    if (!skip) {
5083        lock.unlock();
5084        auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
5085        lock.lock();
5086        if (VK_SUCCESS == result) {
5087            pCB->state = CB_RECORDED;
5088        }
5089        return result;
5090    } else {
5091        return VK_ERROR_VALIDATION_FAILED_EXT;
5092    }
5093}
5094
5095VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
5096    bool skip = false;
5097    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5098    unique_lock_t lock(global_lock);
5099    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5100    VkCommandPool cmdPool = pCB->createInfo.commandPool;
5101    auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5102    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5103        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5104                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_3260005c, "DS",
5105                        "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
5106                        ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5107                        commandBuffer, HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_3260005c]);
5108    }
5109    skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_3260005a);
5110    lock.unlock();
5111    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5112    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
5113    if (VK_SUCCESS == result) {
5114        lock.lock();
5115        resetCB(dev_data, commandBuffer);
5116        lock.unlock();
5117    }
5118    return result;
5119}
5120
5121VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5122                                           VkPipeline pipeline) {
5123    bool skip = false;
5124    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5125    unique_lock_t lock(global_lock);
5126    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5127    if (cb_state) {
5128        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5129                                      VALIDATION_ERROR_18002415);
5130        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
5131        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
5132            skip |=
5133                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5134                        HandleToUint64(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
5135                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
5136                        HandleToUint64(pipeline), HandleToUint64(cb_state->activeRenderPass->renderPass));
5137        }
5138        // TODO: VALIDATION_ERROR_18000612 VALIDATION_ERROR_18000616
5139
5140        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
5141        if (pipe_state) {
5142            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
5143            set_cb_pso_status(cb_state, pipe_state);
5144            set_pipeline_state(pipe_state);
5145            skip |= validate_dual_src_blend_feature(dev_data, pipe_state);
5146        } else {
5147            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5148                            HandleToUint64(pipeline), __LINE__, VALIDATION_ERROR_18027e01, "DS",
5149                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", HandleToUint64(pipeline),
5150                            validation_error_map[VALIDATION_ERROR_18027e01]);
5151        }
5152        addCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
5153        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
5154            // Add binding for child renderpass
5155            auto rp_state = GetRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
5156            if (rp_state) {
5157                addCommandBufferBinding(&rp_state->cb_bindings, {HandleToUint64(rp_state->renderPass), kVulkanObjectTypeRenderPass},
5158                                        cb_state);
5159            }
5160        }
5161    }
5162    lock.unlock();
5163    if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
5164}
5165
5166VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
5167                                          const VkViewport *pViewports) {
5168    bool skip = false;
5169    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5170    unique_lock_t lock(global_lock);
5171    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5172    if (pCB) {
5173        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1e002415);
5174        skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
5175        pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
5176    }
5177    lock.unlock();
5178    if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
5179}
5180
5181VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
5182                                         const VkRect2D *pScissors) {
5183    bool skip = false;
5184    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5185    unique_lock_t lock(global_lock);
5186    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5187    if (pCB) {
5188        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d802415);
5189        skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
5190        pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
5191    }
5192    lock.unlock();
5193    if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
5194}
5195
5196VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
5197    bool skip = false;
5198    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5199    unique_lock_t lock(global_lock);
5200    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5201    if (pCB) {
5202        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d602415);
5203        skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
5204        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
5205
5206        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
5207        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
5208            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5209                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1d600626, "DS",
5210                            "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
5211                            "flag.  This is undefined behavior and could be ignored. %s",
5212                            validation_error_map[VALIDATION_ERROR_1d600626]);
5213        } else {
5214            skip |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, kVulkanObjectTypeCommandBuffer, HandleToUint64(commandBuffer),
5215                                    lineWidth);
5216        }
5217    }
5218    lock.unlock();
5219    if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
5220}
5221
5222VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
5223                                           float depthBiasSlopeFactor) {
5224    bool skip = false;
5225    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5226    unique_lock_t lock(global_lock);
5227    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5228    if (pCB) {
5229        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1cc02415);
5230        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
5231        if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
5232            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5233                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1cc0062c, "DS",
5234                            "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
5235                            "parameter must be set to 0.0. %s",
5236                            validation_error_map[VALIDATION_ERROR_1cc0062c]);
5237        }
5238        if (!skip) {
5239            pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
5240        }
5241    }
5242    lock.unlock();
5243    if (!skip)
5244        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
5245}
5246
5247VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
5248    bool skip = false;
5249    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5250    unique_lock_t lock(global_lock);
5251    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5252    if (pCB) {
5253        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ca02415);
5254        skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
5255        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
5256    }
5257    lock.unlock();
5258    if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
5259}
5260
5261VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
5262    bool skip = false;
5263    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5264    unique_lock_t lock(global_lock);
5265    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5266    if (pCB) {
5267        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ce02415);
5268        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
5269        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
5270    }
5271    lock.unlock();
5272    if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
5273}
5274
5275VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
5276                                                    uint32_t compareMask) {
5277    bool skip = false;
5278    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5279    unique_lock_t lock(global_lock);
5280    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5281    if (pCB) {
5282        skip |=
5283            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1da02415);
5284        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
5285        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
5286    }
5287    lock.unlock();
5288    if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
5289}
5290
5291VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
5292    bool skip = false;
5293    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5294    unique_lock_t lock(global_lock);
5295    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5296    if (pCB) {
5297        skip |=
5298            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1de02415);
5299        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
5300        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
5301    }
5302    lock.unlock();
5303    if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
5304}
5305
5306VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
5307    bool skip = false;
5308    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5309    unique_lock_t lock(global_lock);
5310    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5311    if (pCB) {
5312        skip |=
5313            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1dc02415);
5314        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
5315        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
5316    }
5317    lock.unlock();
5318    if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
5319}
5320
5321VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5322                                                 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
5323                                                 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
5324                                                 const uint32_t *pDynamicOffsets) {
5325    bool skip = false;
5326    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5327    unique_lock_t lock(global_lock);
5328    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5329    if (cb_state) {
5330        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5331                                      VALIDATION_ERROR_17c02415);
5332        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
5333        // Track total count of dynamic descriptor types to make sure we have an offset for each one
5334        uint32_t total_dynamic_descriptors = 0;
5335        string error_string = "";
5336        uint32_t last_set_index = firstSet + setCount - 1;
5337        if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
5338            cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
5339            cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
5340        }
5341        auto old_final_bound_set = cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index];
5342        auto pipeline_layout = getPipelineLayout(dev_data, layout);
5343        for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
5344            cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(dev_data, pDescriptorSets[set_idx]);
5345            if (descriptor_set) {
5346                cb_state->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
5347                cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[set_idx + firstSet] = descriptor_set;
5348                if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
5349                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
5350                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
5351                                    __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
5352                                    "Descriptor Set 0x%" PRIxLEAST64
5353                                    " bound but it was never updated. You may want to either update it or not bind it.",
5354                                    HandleToUint64(pDescriptorSets[set_idx]));
5355                }
5356                // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
5357                if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
5358                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5359                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
5360                                    __LINE__, VALIDATION_ERROR_17c002cc, "DS",
5361                                    "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
5362                                    "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
5363                                    set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str(),
5364                                    validation_error_map[VALIDATION_ERROR_17c002cc]);
5365                }
5366
5367                auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
5368
5369                cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx].clear();
5370
5371                if (set_dynamic_descriptor_count) {
5372                    // First make sure we won't overstep bounds of pDynamicOffsets array
5373                    if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
5374                        skip |= log_msg(
5375                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5376                            HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
5377                            "descriptorSet #%u (0x%" PRIxLEAST64
5378                            ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
5379                            "array. There must be one dynamic offset for each dynamic descriptor being bound.",
5380                            set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
5381                            (dynamicOffsetCount - total_dynamic_descriptors));
5382                    } else {  // Validate and store dynamic offsets with the set
5383                        // Validate Dynamic Offset Minimums
5384                        uint32_t cur_dyn_offset = total_dynamic_descriptors;
5385                        for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
5386                            if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
5387                                if (SafeModulo(
5388                                        pDynamicOffsets[cur_dyn_offset],
5389                                        dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
5390                                    skip |=
5391                                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5392                                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
5393                                                VALIDATION_ERROR_17c002d4, "DS",
5394                                                "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
5395                                                "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
5396                                                cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
5397                                                dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5398                                                validation_error_map[VALIDATION_ERROR_17c002d4]);
5399                                }
5400                                cur_dyn_offset++;
5401                            } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
5402                                if (SafeModulo(
5403                                        pDynamicOffsets[cur_dyn_offset],
5404                                        dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
5405                                    skip |=
5406                                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5407                                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
5408                                                VALIDATION_ERROR_17c002d4, "DS",
5409                                                "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
5410                                                "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
5411                                                cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
5412                                                dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
5413                                                validation_error_map[VALIDATION_ERROR_17c002d4]);
5414                                }
5415                                cur_dyn_offset++;
5416                            }
5417                        }
5418
5419                        cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx] =
5420                            std::vector<uint32_t>(pDynamicOffsets + total_dynamic_descriptors,
5421                                                  pDynamicOffsets + total_dynamic_descriptors + set_dynamic_descriptor_count);
5422                        // Keep running total of dynamic descriptor count to verify at the end
5423                        total_dynamic_descriptors += set_dynamic_descriptor_count;
5424                    }
5425                }
5426            } else {
5427                skip |=
5428                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5429                            HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_SET, "DS",
5430                            "Attempt to bind descriptor set 0x%" PRIxLEAST64 " that doesn't exist!",
5431                            HandleToUint64(pDescriptorSets[set_idx]));
5432            }
5433            // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
5434            if (firstSet > 0) {  // Check set #s below the first bound set
5435                for (uint32_t i = 0; i < firstSet; ++i) {
5436                    if (cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
5437                        !verify_set_layout_compatibility(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i],
5438                                                         pipeline_layout, i, error_string)) {
5439                        skip |= log_msg(
5440                            dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
5441                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5442                            HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i]), __LINE__, DRAWSTATE_NONE,
5443                            "DS", "DescriptorSet 0x%" PRIxLEAST64
5444                                  " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
5445                            HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i]), i,
5446                            HandleToUint64(layout));
5447                        cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
5448                    }
5449                }
5450            }
5451            // Check if newly last bound set invalidates any remaining bound sets
5452            if ((cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (last_set_index)) {
5453                if (old_final_bound_set &&
5454                    !verify_set_layout_compatibility(old_final_bound_set, pipeline_layout, last_set_index, error_string)) {
5455                    auto old_set = old_final_bound_set->GetSet();
5456                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
5457                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(old_set), __LINE__,
5458                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
5459                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
5460                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
5461                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
5462                                    HandleToUint64(old_set), last_set_index,
5463                                    HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index]),
5464                                    last_set_index, last_set_index + 1, HandleToUint64(layout));
5465                    cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
5466                }
5467            }
5468        }
5469        //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
5470        if (total_dynamic_descriptors != dynamicOffsetCount) {
5471            skip |=
5472                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5473                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_17c002ce, "DS",
5474                        "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
5475                        "is %u. It should exactly match the number of dynamic descriptors. %s",
5476                        setCount, total_dynamic_descriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_17c002ce]);
5477        }
5478    }
5479    lock.unlock();
5480    if (!skip)
5481        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
5482                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
5483}
5484
5485VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5486                                              VkIndexType indexType) {
5487    bool skip = false;
5488    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5489    // TODO : Somewhere need to verify that IBs have correct usage state flagged
5490    unique_lock_t lock(global_lock);
5491
5492    auto buffer_state = GetBufferState(dev_data, buffer);
5493    auto cb_node = GetCBNode(dev_data, commandBuffer);
5494    if (cb_node && buffer_state) {
5495        skip |=
5496            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_17e02415);
5497        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
5498        skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_17e00364);
5499        std::function<bool()> function = [=]() {
5500            return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
5501        };
5502        cb_node->validate_functions.push_back(function);
5503        VkDeviceSize offset_align = 0;
5504        switch (indexType) {
5505            case VK_INDEX_TYPE_UINT16:
5506                offset_align = 2;
5507                break;
5508            case VK_INDEX_TYPE_UINT32:
5509                offset_align = 4;
5510                break;
5511            default:
5512                // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
5513                break;
5514        }
5515        if (!offset_align || (offset % offset_align)) {
5516            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5517                            HandleToUint64(commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
5518                            "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
5519                            string_VkIndexType(indexType));
5520        }
5521        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
5522    } else {
5523        assert(0);
5524    }
5525    lock.unlock();
5526    if (!skip) dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
5527}
5528
5529void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
5530    uint32_t end = firstBinding + bindingCount;
5531    if (pCB->currentDrawData.buffers.size() < end) {
5532        pCB->currentDrawData.buffers.resize(end);
5533    }
5534    for (uint32_t i = 0; i < bindingCount; ++i) {
5535        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
5536    }
5537}
5538
5539static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
5540
5541VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
5542                                                const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
5543    bool skip = false;
5544    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5545    // TODO : Somewhere need to verify that VBs have correct usage state flagged
5546    unique_lock_t lock(global_lock);
5547
5548    auto cb_node = GetCBNode(dev_data, commandBuffer);
5549    if (cb_node) {
5550        skip |=
5551            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_18202415);
5552        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffers()");
5553        for (uint32_t i = 0; i < bindingCount; ++i) {
5554            auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
5555            assert(buffer_state);
5556            skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_182004e8);
5557            std::function<bool()> function = [=]() {
5558                return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
5559            };
5560            cb_node->validate_functions.push_back(function);
5561            if (pOffsets[i] >= buffer_state->createInfo.size) {
5562                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5563                                HandleToUint64(buffer_state->buffer), __LINE__, VALIDATION_ERROR_182004e4, "DS",
5564                                "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer. %s",
5565                                pOffsets[i], validation_error_map[VALIDATION_ERROR_182004e4]);
5566            }
5567        }
5568        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
5569    } else {
5570        assert(0);
5571    }
5572    lock.unlock();
5573    if (!skip) dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
5574}
5575
5576// Expects global_lock to be held by caller
5577static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5578    for (auto imageView : pCB->updateImages) {
5579        auto view_state = GetImageViewState(dev_data, imageView);
5580        if (!view_state) continue;
5581
5582        auto image_state = GetImageState(dev_data, view_state->create_info.image);
5583        assert(image_state);
5584        std::function<bool()> function = [=]() {
5585            SetImageMemoryValid(dev_data, image_state, true);
5586            return false;
5587        };
5588        pCB->validate_functions.push_back(function);
5589    }
5590    for (auto buffer : pCB->updateBuffers) {
5591        auto buffer_state = GetBufferState(dev_data, buffer);
5592        assert(buffer_state);
5593        std::function<bool()> function = [=]() {
5594            SetBufferMemoryValid(dev_data, buffer_state, true);
5595            return false;
5596        };
5597        pCB->validate_functions.push_back(function);
5598    }
5599}
5600
5601// Generic function to handle validation for all CmdDraw* type functions
5602static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
5603                                CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
5604                                UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
5605                                UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
5606    bool skip = false;
5607    *cb_state = GetCBNode(dev_data, cmd_buffer);
5608    if (*cb_state) {
5609        skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
5610        skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
5611        skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
5612        skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
5613                                                                : insideRenderPass(dev_data, *cb_state, caller, msg_code);
5614    }
5615    return skip;
5616}
5617
5618// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
5619static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5620    UpdateDrawState(dev_data, cb_state, bind_point);
5621    MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
5622}
5623
5624// Generic function to handle state update for all CmdDraw* type functions
5625static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5626    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
5627    updateResourceTrackingOnDraw(cb_state);
5628    cb_state->hasDrawCmd = true;
5629}
5630
5631static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
5632                                   GLOBAL_CB_NODE **cb_state, const char *caller) {
5633    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5634                               VALIDATION_ERROR_1a202415, VALIDATION_ERROR_1a200017, VALIDATION_ERROR_1a200376);
5635}
5636
5637static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5638    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5639}
5640
5641VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
5642                                   uint32_t firstVertex, uint32_t firstInstance) {
5643    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5644    GLOBAL_CB_NODE *cb_state = nullptr;
5645    unique_lock_t lock(global_lock);
5646    bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
5647    lock.unlock();
5648    if (!skip) {
5649        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
5650        lock.lock();
5651        PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
5652        lock.unlock();
5653    }
5654}
5655
5656static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
5657                                          VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
5658    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5659                               VALIDATION_ERROR_1a402415, VALIDATION_ERROR_1a400017, VALIDATION_ERROR_1a40039c);
5660}
5661
5662static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5663    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5664}
5665
5666VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
5667                                          uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
5668    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5669    GLOBAL_CB_NODE *cb_state = nullptr;
5670    unique_lock_t lock(global_lock);
5671    bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
5672                                              "vkCmdDrawIndexed()");
5673    lock.unlock();
5674    if (!skip) {
5675        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
5676        lock.lock();
5677        PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
5678        lock.unlock();
5679    }
5680}
5681
5682static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5683                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
5684                                           const char *caller) {
5685    bool skip =
5686        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5687                            VALIDATION_ERROR_1aa02415, VALIDATION_ERROR_1aa00017, VALIDATION_ERROR_1aa003cc);
5688    *buffer_state = GetBufferState(dev_data, buffer);
5689    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1aa003b4);
5690    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
5691    // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
5692    return skip;
5693}
5694
5695static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5696                                          BUFFER_STATE *buffer_state) {
5697    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5698    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5699}
5700
5701VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
5702                                           uint32_t stride) {
5703    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5704    GLOBAL_CB_NODE *cb_state = nullptr;
5705    BUFFER_STATE *buffer_state = nullptr;
5706    unique_lock_t lock(global_lock);
5707    bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
5708                                               &buffer_state, "vkCmdDrawIndirect()");
5709    lock.unlock();
5710    if (!skip) {
5711        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
5712        lock.lock();
5713        PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
5714        lock.unlock();
5715    }
5716}
5717
5718static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5719                                                  VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
5720                                                  BUFFER_STATE **buffer_state, const char *caller) {
5721    bool skip =
5722        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
5723                            VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1a602415, VALIDATION_ERROR_1a600017, VALIDATION_ERROR_1a600434);
5724    *buffer_state = GetBufferState(dev_data, buffer);
5725    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a60041c);
5726    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
5727    // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
5728    // 'buffer'.
5729    return skip;
5730}
5731
5732static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5733                                                 BUFFER_STATE *buffer_state) {
5734    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5735    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5736}
5737
5738VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5739                                                  uint32_t count, uint32_t stride) {
5740    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5741    GLOBAL_CB_NODE *cb_state = nullptr;
5742    BUFFER_STATE *buffer_state = nullptr;
5743    unique_lock_t lock(global_lock);
5744    bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
5745                                                      &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
5746    lock.unlock();
5747    if (!skip) {
5748        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
5749        lock.lock();
5750        PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
5751        lock.unlock();
5752    }
5753}
5754
5755static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
5756                                       VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
5757    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
5758                               VALIDATION_ERROR_19c02415, VALIDATION_ERROR_19c00017, VALIDATION_ERROR_UNDEFINED);
5759}
5760
5761static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5762    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
5763}
5764
5765VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
5766    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5767    GLOBAL_CB_NODE *cb_state = nullptr;
5768    unique_lock_t lock(global_lock);
5769    bool skip =
5770        PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
5771    lock.unlock();
5772    if (!skip) {
5773        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
5774        lock.lock();
5775        PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
5776        lock.unlock();
5777    }
5778}
5779
5780static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5781                                               VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
5782                                               BUFFER_STATE **buffer_state, const char *caller) {
5783    bool skip =
5784        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
5785                            VALIDATION_ERROR_1a002415, VALIDATION_ERROR_1a000017, VALIDATION_ERROR_UNDEFINED);
5786    *buffer_state = GetBufferState(dev_data, buffer);
5787    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a000322);
5788    return skip;
5789}
5790
5791static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5792                                              BUFFER_STATE *buffer_state) {
5793    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
5794    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5795}
5796
5797VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
5798    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5799    GLOBAL_CB_NODE *cb_state = nullptr;
5800    BUFFER_STATE *buffer_state = nullptr;
5801    unique_lock_t lock(global_lock);
5802    bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
5803                                                   &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
5804    lock.unlock();
5805    if (!skip) {
5806        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
5807        lock.lock();
5808        PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
5809        lock.unlock();
5810    }
5811}
5812
5813VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
5814                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
5815    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5816    unique_lock_t lock(global_lock);
5817
5818    auto cb_node = GetCBNode(device_data, commandBuffer);
5819    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
5820    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
5821
5822    if (cb_node && src_buffer_state && dst_buffer_state) {
5823        bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
5824        if (!skip) {
5825            PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
5826            lock.unlock();
5827            device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
5828        }
5829    } else {
5830        lock.unlock();
5831        assert(0);
5832    }
5833}
5834
5835VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5836                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5837                                        const VkImageCopy *pRegions) {
5838    bool skip = false;
5839    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5840    unique_lock_t lock(global_lock);
5841
5842    auto cb_node = GetCBNode(device_data, commandBuffer);
5843    auto src_image_state = GetImageState(device_data, srcImage);
5844    auto dst_image_state = GetImageState(device_data, dstImage);
5845    if (cb_node && src_image_state && dst_image_state) {
5846        skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
5847                                           srcImageLayout, dstImageLayout);
5848        if (!skip) {
5849            PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
5850                                      dstImageLayout);
5851            lock.unlock();
5852            device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5853                                                     pRegions);
5854        }
5855    } else {
5856        lock.unlock();
5857        assert(0);
5858    }
5859}
5860
5861// Validate that an image's sampleCount matches the requirement for a specific API call
5862bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
5863                              const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
5864    bool skip = false;
5865    if (image_state->createInfo.samples != sample_count) {
5866        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
5867                       HandleToUint64(image_state->image), 0, msgCode, "DS",
5868                       "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
5869                       HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
5870                       string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
5871    }
5872    return skip;
5873}
5874
5875VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5876                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5877                                        const VkImageBlit *pRegions, VkFilter filter) {
5878    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5879    unique_lock_t lock(global_lock);
5880
5881    auto cb_node = GetCBNode(dev_data, commandBuffer);
5882    auto src_image_state = GetImageState(dev_data, srcImage);
5883    auto dst_image_state = GetImageState(dev_data, dstImage);
5884
5885    bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, filter);
5886
5887    if (!skip) {
5888        PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state);
5889        lock.unlock();
5890        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5891                                              pRegions, filter);
5892    }
5893}
5894
5895VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
5896                                                VkImageLayout dstImageLayout, uint32_t regionCount,
5897                                                const VkBufferImageCopy *pRegions) {
5898    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5899    unique_lock_t lock(global_lock);
5900    bool skip = false;
5901    auto cb_node = GetCBNode(device_data, commandBuffer);
5902    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
5903    auto dst_image_state = GetImageState(device_data, dstImage);
5904    if (cb_node && src_buffer_state && dst_image_state) {
5905        skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
5906                                                        regionCount, pRegions, "vkCmdCopyBufferToImage()");
5907    } else {
5908        lock.unlock();
5909        assert(0);
5910        // TODO: report VU01244 here, or put in object tracker?
5911    }
5912    if (!skip) {
5913        PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
5914                                          dstImageLayout);
5915        lock.unlock();
5916        device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
5917    }
5918}
5919
5920VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5921                                                VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
5922    bool skip = false;
5923    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5924    unique_lock_t lock(global_lock);
5925
5926    auto cb_node = GetCBNode(device_data, commandBuffer);
5927    auto src_image_state = GetImageState(device_data, srcImage);
5928    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
5929    if (cb_node && src_image_state && dst_buffer_state) {
5930        skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
5931                                                        regionCount, pRegions, "vkCmdCopyImageToBuffer()");
5932    } else {
5933        lock.unlock();
5934        assert(0);
5935        // TODO: report VU01262 here, or put in object tracker?
5936    }
5937    if (!skip) {
5938        PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
5939                                          srcImageLayout);
5940        lock.unlock();
5941        device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
5942    }
5943}
5944
5945static bool PreCallCmdUpdateBuffer(layer_data *device_data, const GLOBAL_CB_NODE *cb_state, const BUFFER_STATE *dst_buffer_state) {
5946    bool skip = false;
5947    skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400046);
5948    // Validate that DST buffer has correct usage flags set
5949    skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
5950                                     VALIDATION_ERROR_1e400044, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
5951    skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
5952                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1e402415);
5953    skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
5954    skip |= insideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400017);
5955    return skip;
5956}
5957
5958static void PostCallRecordCmdUpdateBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buffer_state) {
5959    // Update bindings between buffer and cmd buffer
5960    AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
5961    std::function<bool()> function = [=]() {
5962        SetBufferMemoryValid(device_data, dst_buffer_state, true);
5963        return false;
5964    };
5965    cb_state->validate_functions.push_back(function);
5966}
5967
5968VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5969                                           VkDeviceSize dataSize, const uint32_t *pData) {
5970    bool skip = false;
5971    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5972    unique_lock_t lock(global_lock);
5973
5974    auto cb_state = GetCBNode(dev_data, commandBuffer);
5975    assert(cb_state);
5976    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
5977    assert(dst_buff_state);
5978    skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
5979    lock.unlock();
5980    if (!skip) {
5981        dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
5982        lock.lock();
5983        PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
5984        lock.unlock();
5985    }
5986}
5987
5988VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5989                                         VkDeviceSize size, uint32_t data) {
5990    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5991    unique_lock_t lock(global_lock);
5992    auto cb_node = GetCBNode(device_data, commandBuffer);
5993    auto buffer_state = GetBufferState(device_data, dstBuffer);
5994
5995    if (cb_node && buffer_state) {
5996        bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
5997        if (!skip) {
5998            PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
5999            lock.unlock();
6000            device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
6001        }
6002    } else {
6003        lock.unlock();
6004        assert(0);
6005    }
6006}
6007
6008VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
6009                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
6010                                               const VkClearRect *pRects) {
6011    bool skip = false;
6012    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6013    {
6014        lock_guard_t lock(global_lock);
6015        skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6016    }
6017    if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6018}
6019
6020VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6021                                              const VkClearColorValue *pColor, uint32_t rangeCount,
6022                                              const VkImageSubresourceRange *pRanges) {
6023    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6024    unique_lock_t lock(global_lock);
6025
6026    bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6027    if (!skip) {
6028        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6029        lock.unlock();
6030        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
6031    }
6032}
6033
6034VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6035                                                     const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
6036                                                     const VkImageSubresourceRange *pRanges) {
6037    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6038    unique_lock_t lock(global_lock);
6039
6040    bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6041    if (!skip) {
6042        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6043        lock.unlock();
6044        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
6045    }
6046}
6047
6048VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6049                                           VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
6050                                           const VkImageResolve *pRegions) {
6051    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6052    unique_lock_t lock(global_lock);
6053
6054    auto cb_node = GetCBNode(dev_data, commandBuffer);
6055    auto src_image_state = GetImageState(dev_data, srcImage);
6056    auto dst_image_state = GetImageState(dev_data, dstImage);
6057
6058    bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
6059
6060    if (!skip) {
6061        PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
6062        lock.unlock();
6063        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
6064                                                 pRegions);
6065    }
6066}
6067
6068VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
6069                                                     VkSubresourceLayout *pLayout) {
6070    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6071
6072    bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
6073    if (!skip) {
6074        device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
6075    }
6076}
6077
6078bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6079    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6080    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6081    if (pCB) {
6082        pCB->eventToStageMap[event] = stageMask;
6083    }
6084    auto queue_data = dev_data->queueMap.find(queue);
6085    if (queue_data != dev_data->queueMap.end()) {
6086        queue_data->second.eventToStageMap[event] = stageMask;
6087    }
6088    return false;
6089}
6090
6091VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6092    bool skip = false;
6093    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6094    unique_lock_t lock(global_lock);
6095    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6096    if (pCB) {
6097        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6098                                      VALIDATION_ERROR_1d402415);
6099        skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
6100        skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_1d400017);
6101        skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_1d4008fc,
6102                                             VALIDATION_ERROR_1d4008fe);
6103        auto event_state = GetEventNode(dev_data, event);
6104        if (event_state) {
6105            addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6106            event_state->cb_bindings.insert(pCB);
6107        }
6108        pCB->events.push_back(event);
6109        if (!pCB->waitedEvents.count(event)) {
6110            pCB->writeEventsBeforeWait.push_back(event);
6111        }
6112        pCB->eventUpdates.emplace_back([=](VkQueue q){return setEventStageMask(q, commandBuffer, event, stageMask);});
6113    }
6114    lock.unlock();
6115    if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
6116}
6117
6118VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6119    bool skip = false;
6120    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6121    unique_lock_t lock(global_lock);
6122    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6123    if (pCB) {
6124        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6125                                      VALIDATION_ERROR_1c402415);
6126        skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
6127        skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_1c400017);
6128        skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_1c400904,
6129                                             VALIDATION_ERROR_1c400906);
6130        auto event_state = GetEventNode(dev_data, event);
6131        if (event_state) {
6132            addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6133            event_state->cb_bindings.insert(pCB);
6134        }
6135        pCB->events.push_back(event);
6136        if (!pCB->waitedEvents.count(event)) {
6137            pCB->writeEventsBeforeWait.push_back(event);
6138        }
6139        // TODO : Add check for VALIDATION_ERROR_32c008f8
6140        pCB->eventUpdates.emplace_back([=](VkQueue q){return setEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0));});
6141    }
6142    lock.unlock();
6143    if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
6144}
6145
6146// Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT
6147static VkPipelineStageFlags ExpandPipelineStageFlags(VkPipelineStageFlags inflags) {
6148    return (inflags != VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT)
6149               ? inflags
6150               : (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
6151                  VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
6152                  VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
6153                  VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
6154                  VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
6155                  VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
6156}
6157
6158// Validate VUs for Pipeline Barriers that are within a renderPass
6159// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
6160static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
6161                                               VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
6162                                               VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
6163                                               const VkMemoryBarrier *mem_barriers, uint32_t bufferBarrierCount,
6164                                               const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
6165                                               const VkImageMemoryBarrier *pImageMemBarriers) {
6166    bool skip = false;
6167    auto rp_state = cb_state->activeRenderPass;
6168    auto rp_handle = HandleToUint64(rp_state->renderPass);
6169    if (!rp_state->hasSelfDependency[cb_state->activeSubpass]) {
6170        skip |=
6171            log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
6172                    __LINE__, VALIDATION_ERROR_1b800928, "CORE",
6173                    "%s: Barriers cannot be set during subpass %d of renderPass 0x%" PRIx64 "with no self-dependency specified. %s",
6174                    funcName, cb_state->activeSubpass, rp_handle, validation_error_map[VALIDATION_ERROR_1b800928]);
6175    } else {
6176        assert(rp_state->subpass_to_dependency_index[cb_state->activeSubpass] != -1);
6177        const auto &sub_dep = rp_state->createInfo.pDependencies[rp_state->subpass_to_dependency_index[cb_state->activeSubpass]];
6178        const auto &sub_src_stage_mask = ExpandPipelineStageFlags(sub_dep.srcStageMask);
6179        const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(sub_dep.dstStageMask);
6180        if ((sub_src_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
6181            (src_stage_mask != (sub_src_stage_mask & src_stage_mask))) {
6182            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6183                            rp_handle, __LINE__, VALIDATION_ERROR_1b80092a, "CORE",
6184                            "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask(0x%X) of "
6185                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6186                            funcName, src_stage_mask, sub_src_stage_mask, cb_state->activeSubpass, rp_handle,
6187                            validation_error_map[VALIDATION_ERROR_1b80092a]);
6188        }
6189        if ((sub_dst_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
6190            (dst_stage_mask != (sub_dst_stage_mask & dst_stage_mask))) {
6191            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6192                            rp_handle, __LINE__, VALIDATION_ERROR_1b80092c, "CORE",
6193                            "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask(0x%X) of "
6194                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6195                            funcName, dst_stage_mask, sub_dst_stage_mask, cb_state->activeSubpass, rp_handle,
6196                            validation_error_map[VALIDATION_ERROR_1b80092c]);
6197        }
6198        const auto &sub_src_access_mask = sub_dep.srcAccessMask;
6199        const auto &sub_dst_access_mask = sub_dep.dstAccessMask;
6200        for (uint32_t i = 0; i < mem_barrier_count; ++i) {
6201            const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
6202            if (mb_src_access_mask != (sub_src_access_mask & mb_src_access_mask)) {
6203                skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6204                                VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, __LINE__, VALIDATION_ERROR_1b80092e, "CORE",
6205                                "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
6206                                "srcAccessMask(0x%X) of "
6207                                "subpass %d of renderPass 0x%" PRIx64 ". %s",
6208                                funcName, i, mb_src_access_mask, sub_src_access_mask, cb_state->activeSubpass, rp_handle,
6209                                validation_error_map[VALIDATION_ERROR_1b80092e]);
6210            }
6211            const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
6212            if (mb_dst_access_mask != (sub_dst_access_mask & mb_dst_access_mask)) {
6213                skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6214                                VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, __LINE__, VALIDATION_ERROR_1b800930, "CORE",
6215                                "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
6216                                "dstAccessMask(0x%X) of "
6217                                "subpass %d of renderPass 0x%" PRIx64 ". %s",
6218                                funcName, i, mb_src_access_mask, sub_src_access_mask, cb_state->activeSubpass, rp_handle,
6219                                validation_error_map[VALIDATION_ERROR_1b800930]);
6220            }
6221        }
6222        if (sub_dep.dependencyFlags != dependency_flags) {
6223            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6224                            rp_handle, __LINE__, VALIDATION_ERROR_1b800932, "CORE",
6225                            "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency "
6226                            "dependencyFlags value (0x%X) for "
6227                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6228                            funcName, dependency_flags, sub_dep.dependencyFlags, cb_state->activeSubpass, rp_handle,
6229                            validation_error_map[VALIDATION_ERROR_1b800932]);
6230        }
6231    }
6232    return skip;
6233}
6234
6235static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
6236                             VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
6237                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
6238                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
6239                             const VkImageMemoryBarrier *pImageMemBarriers) {
6240    bool skip = false;
6241    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
6242        auto mem_barrier = &pImageMemBarriers[i];
6243        auto image_data = GetImageState(device_data, mem_barrier->image);
6244        if (image_data) {
6245            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
6246            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
6247            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
6248                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
6249                // be VK_QUEUE_FAMILY_IGNORED
6250                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
6251                    skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6252                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
6253                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6254                                    "%s: Image Barrier for image 0x%" PRIx64
6255                                    " was created with sharingMode of "
6256                                    "VK_SHARING_MODE_CONCURRENT. Src and dst "
6257                                    "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
6258                                    funcName, HandleToUint64(mem_barrier->image));
6259                }
6260            } else {
6261                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
6262                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
6263                // or both be a valid queue family
6264                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
6265                    (src_q_f_index != dst_q_f_index)) {
6266                    skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6267                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
6268                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6269                                    "%s: Image 0x%" PRIx64
6270                                    " was created with sharingMode "
6271                                    "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
6272                                    "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
6273                                    "must be.",
6274                                    funcName, HandleToUint64(mem_barrier->image));
6275                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
6276                           ((src_q_f_index >= device_data->phys_dev_properties.queue_family_properties.size()) ||
6277                            (dst_q_f_index >= device_data->phys_dev_properties.queue_family_properties.size()))) {
6278                    skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6279                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
6280                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6281                                    "%s: Image 0x%" PRIx64
6282                                    " was created with sharingMode "
6283                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
6284                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
6285                                    "queueFamilies crated for this device.",
6286                                    funcName, HandleToUint64(mem_barrier->image), src_q_f_index, dst_q_f_index,
6287                                    device_data->phys_dev_properties.queue_family_properties.size());
6288                }
6289            }
6290        }
6291
6292        if (mem_barrier->oldLayout != mem_barrier->newLayout) {
6293            if (cb_state->activeRenderPass) {
6294                skip |=
6295                    log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6296                            HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b80093a, "DS",
6297                            "%s: As the Image Barrier for image 0x%" PRIx64
6298                            " is being executed within a render pass instance, oldLayout must equal newLayout yet they are "
6299                            "%s and %s. %s",
6300                            funcName, HandleToUint64(mem_barrier->image), string_VkImageLayout(mem_barrier->oldLayout),
6301                            string_VkImageLayout(mem_barrier->newLayout), validation_error_map[VALIDATION_ERROR_1b80093a]);
6302            }
6303            skip |= ValidateMaskBitsFromLayouts(device_data, cb_state->commandBuffer, mem_barrier->srcAccessMask,
6304                                                mem_barrier->oldLayout, "Source");
6305            skip |= ValidateMaskBitsFromLayouts(device_data, cb_state->commandBuffer, mem_barrier->dstAccessMask,
6306                                                mem_barrier->newLayout, "Dest");
6307        }
6308        if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
6309            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6310                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6311                            "%s: Image Layout cannot be transitioned to UNDEFINED or "
6312                            "PREINITIALIZED.",
6313                            funcName);
6314        }
6315        if (image_data) {
6316            auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
6317            skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
6318
6319            std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
6320            skip |= ValidateImageSubresourceRange(device_data, image_data, false, mem_barrier->subresourceRange, funcName,
6321                                                  param_name.c_str());
6322        }
6323    }
6324
6325    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
6326        auto mem_barrier = &pBufferMemBarriers[i];
6327        if (cb_state->activeRenderPass) {
6328            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6329                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6330                            "%s: Buffer Barriers cannot be used during a render pass.", funcName);
6331        }
6332        if (!mem_barrier) continue;
6333
6334        // Validate buffer barrier queue family indices
6335        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
6336             mem_barrier->srcQueueFamilyIndex >= device_data->phys_dev_properties.queue_family_properties.size()) ||
6337            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
6338             mem_barrier->dstQueueFamilyIndex >= device_data->phys_dev_properties.queue_family_properties.size())) {
6339            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6340                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6341                            "%s: Buffer Barrier 0x%" PRIx64
6342                            " has QueueFamilyIndex greater "
6343                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
6344                            funcName, HandleToUint64(mem_barrier->buffer),
6345                            device_data->phys_dev_properties.queue_family_properties.size());
6346        }
6347
6348        auto buffer_state = GetBufferState(device_data, mem_barrier->buffer);
6349        if (buffer_state) {
6350            auto buffer_size = buffer_state->requirements.size;
6351            if (mem_barrier->offset >= buffer_size) {
6352                skip |= log_msg(
6353                    device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6354                    HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6355                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
6356                    funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
6357                    HandleToUint64(buffer_size));
6358            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
6359                skip |=
6360                    log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6361                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6362                            "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
6363                            " whose sum is greater than total size 0x%" PRIx64 ".",
6364                            funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
6365                            HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
6366            }
6367        }
6368    }
6369    return skip;
6370}
6371
6372bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
6373                            VkPipelineStageFlags sourceStageMask) {
6374    bool skip = false;
6375    VkPipelineStageFlags stageMask = 0;
6376    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
6377    for (uint32_t i = 0; i < eventCount; ++i) {
6378        auto event = pCB->events[firstEventIndex + i];
6379        auto queue_data = dev_data->queueMap.find(queue);
6380        if (queue_data == dev_data->queueMap.end()) return false;
6381        auto event_data = queue_data->second.eventToStageMap.find(event);
6382        if (event_data != queue_data->second.eventToStageMap.end()) {
6383            stageMask |= event_data->second;
6384        } else {
6385            auto global_event_data = GetEventNode(dev_data, event);
6386            if (!global_event_data) {
6387                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
6388                                HandleToUint64(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
6389                                "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
6390            } else {
6391                stageMask |= global_event_data->stageMask;
6392            }
6393        }
6394    }
6395    // TODO: Need to validate that host_bit is only set if set event is called
6396    // but set event can be called at any time.
6397    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
6398        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6399                        HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1e62d401, "DS",
6400                        "Submitting cmdbuffer with call to VkCmdWaitEvents "
6401                        "using srcStageMask 0x%X which must be the bitwise "
6402                        "OR of the stageMask parameters used in calls to "
6403                        "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
6404                        "used with vkSetEvent but instead is 0x%X. %s",
6405                        sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_1e62d401]);
6406    }
6407    return skip;
6408}
6409
6410// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
6411static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
6412    {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
6413    {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
6414    {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
6415    {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6416    {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6417    {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6418    {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6419    {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6420    {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
6421    {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
6422    {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
6423    {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
6424    {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
6425    {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
6426
6427static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
6428                                                            VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
6429                                                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
6430                                                            VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
6431                                                            VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
6432                                                            VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
6433                                                            VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
6434                                                            VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
6435                                                            VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
6436                                                            VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
6437                                                            VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
6438                                                            VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
6439                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
6440                                                            VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
6441
6442bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
6443                                      VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
6444                                      UNIQUE_VALIDATION_ERROR_CODE error_code) {
6445    bool skip = false;
6446    // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
6447    for (const auto &item : stage_flag_bit_array) {
6448        if (stage_mask & item) {
6449            if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
6450                skip |=
6451                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6452                            HandleToUint64(command_buffer), __LINE__, error_code, "DL",
6453                            "%s(): %s flag %s is not compatible with the queue family properties of this "
6454                            "command buffer. %s",
6455                            function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
6456                            validation_error_map[error_code]);
6457            }
6458        }
6459    }
6460    return skip;
6461}
6462
6463bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
6464                                                VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
6465                                                const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
6466    bool skip = false;
6467    uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
6468    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
6469    auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
6470
6471    // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
6472    // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
6473    // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
6474
6475    if (queue_family_index < physical_device_state->queue_family_properties.size()) {
6476        VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
6477
6478        if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
6479            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
6480                                                     function, "srcStageMask", error_code);
6481        }
6482        if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
6483            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
6484                                                     function, "dstStageMask", error_code);
6485        }
6486    }
6487    return skip;
6488}
6489
6490VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6491                                         VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
6492                                         uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6493                                         uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6494                                         uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6495    bool skip = false;
6496    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6497    unique_lock_t lock(global_lock);
6498    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6499    if (cb_state) {
6500        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
6501                                                           VALIDATION_ERROR_1e600918);
6502        skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e60090e,
6503                                             VALIDATION_ERROR_1e600912);
6504        skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e600910,
6505                                             VALIDATION_ERROR_1e600914);
6506        auto first_event_index = cb_state->events.size();
6507        for (uint32_t i = 0; i < eventCount; ++i) {
6508            auto event_state = GetEventNode(dev_data, pEvents[i]);
6509            if (event_state) {
6510                addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent}, cb_state);
6511                event_state->cb_bindings.insert(cb_state);
6512            }
6513            cb_state->waitedEvents.insert(pEvents[i]);
6514            cb_state->events.push_back(pEvents[i]);
6515        }
6516        cb_state->eventUpdates.emplace_back([=](VkQueue q){
6517            return validateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask);
6518        });
6519        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6520                                      VALIDATION_ERROR_1e602415);
6521        skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
6522        skip |= ValidateBarriersToImages(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
6523        if (!skip) {
6524            TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6525        }
6526        skip |= ValidateBarriers(dev_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
6527                                 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
6528                                 pImageMemoryBarriers);
6529    }
6530    lock.unlock();
6531    if (!skip)
6532        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
6533                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6534                                               imageMemoryBarrierCount, pImageMemoryBarriers);
6535}
6536
6537static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE const *cb_state,
6538                                              VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6539                                              VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount,
6540                                              const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
6541                                              const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
6542                                              const VkImageMemoryBarrier *pImageMemoryBarriers) {
6543    bool skip = false;
6544    skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
6545                                                       VALIDATION_ERROR_1b80093e);
6546    skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
6547                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b802415);
6548    skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
6549    skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800920,
6550                                         VALIDATION_ERROR_1b800924);
6551    skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800922,
6552                                         VALIDATION_ERROR_1b800926);
6553    skip |=
6554        ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
6555    if (cb_state->activeRenderPass) {
6556        skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
6557                                                   dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6558                                                   pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6559    }
6560    skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
6561                             pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
6562                             pImageMemoryBarriers);
6563    return skip;
6564}
6565
6566static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
6567                                            uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6568    TransitionImageLayouts(device_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6569}
6570
6571VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
6572                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
6573                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6574                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6575                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6576    bool skip = false;
6577    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6578    unique_lock_t lock(global_lock);
6579    GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
6580    if (cb_state) {
6581        skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, dependencyFlags,
6582                                                  memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6583                                                  pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6584        if (!skip) {
6585            PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6586        }
6587    } else {
6588        assert(0);
6589    }
6590    lock.unlock();
6591    if (!skip) {
6592        device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
6593                                                       pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6594                                                       imageMemoryBarrierCount, pImageMemoryBarriers);
6595    }
6596}
6597
6598static bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
6599    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6600    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6601    if (pCB) {
6602        pCB->queryToStateMap[object] = value;
6603    }
6604    auto queue_data = dev_data->queueMap.find(queue);
6605    if (queue_data != dev_data->queueMap.end()) {
6606        queue_data->second.queryToStateMap[object] = value;
6607    }
6608    return false;
6609}
6610
6611VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
6612    bool skip = false;
6613    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6614    unique_lock_t lock(global_lock);
6615    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6616    if (pCB) {
6617        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6618                                      VALIDATION_ERROR_17802415);
6619        skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
6620    }
6621    lock.unlock();
6622
6623    if (skip) return;
6624
6625    dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
6626
6627    lock.lock();
6628    if (pCB) {
6629        QueryObject query = {queryPool, slot};
6630        pCB->activeQueries.insert(query);
6631        pCB->startedQueries.insert(query);
6632        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6633                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
6634    }
6635}
6636
6637VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
6638    bool skip = false;
6639    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6640    unique_lock_t lock(global_lock);
6641    QueryObject query = {queryPool, slot};
6642    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6643    if (cb_state) {
6644        if (!cb_state->activeQueries.count(query)) {
6645            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6646                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1ae00652, "DS",
6647                            "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
6648                            HandleToUint64(queryPool), slot, validation_error_map[VALIDATION_ERROR_1ae00652]);
6649        }
6650        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6651                                      VALIDATION_ERROR_1ae02415);
6652        skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
6653    }
6654    lock.unlock();
6655
6656    if (skip) return;
6657
6658    dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
6659
6660    lock.lock();
6661    if (cb_state) {
6662        cb_state->activeQueries.erase(query);
6663        cb_state->queryUpdates.emplace_back([=](VkQueue q){return setQueryState(q, commandBuffer, query, true);});
6664        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6665                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
6666    }
6667}
6668
6669VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
6670                                             uint32_t queryCount) {
6671    bool skip = false;
6672    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6673    unique_lock_t lock(global_lock);
6674    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6675        skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_1c600017);
6676        skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
6677        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6678                                      VALIDATION_ERROR_1c602415);
6679    lock.unlock();
6680
6681    if (skip) return;
6682
6683    dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
6684
6685    lock.lock();
6686    for (uint32_t i = 0; i < queryCount; i++) {
6687        QueryObject query = {queryPool, firstQuery + i};
6688        cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
6689        cb_state->queryUpdates.emplace_back([=](VkQueue q){return setQueryState(q, commandBuffer, query, false);});
6690    }
6691    addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6692                            {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
6693}
6694
6695static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
6696    QueryObject query = {queryPool, queryIndex};
6697    auto query_data = queue_data->queryToStateMap.find(query);
6698    if (query_data != queue_data->queryToStateMap.end()) {
6699        if (!query_data->second) return true;
6700    } else {
6701        auto it = dev_data->queryToStateMap.find(query);
6702        if (it == dev_data->queryToStateMap.end() || !it->second)
6703            return true;
6704    }
6705
6706    return false;
6707}
6708
6709static bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
6710    bool skip = false;
6711    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
6712    auto queue_data = GetQueueState(dev_data, queue);
6713    if (!queue_data) return false;
6714    for (uint32_t i = 0; i < queryCount; i++) {
6715        if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
6716            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6717                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
6718                            "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
6719                            HandleToUint64(queryPool), firstQuery + i);
6720        }
6721    }
6722    return skip;
6723}
6724
6725VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
6726                                                   uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
6727                                                   VkDeviceSize stride, VkQueryResultFlags flags) {
6728    bool skip = false;
6729    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6730    unique_lock_t lock(global_lock);
6731
6732    auto cb_node = GetCBNode(dev_data, commandBuffer);
6733    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
6734    if (cb_node && dst_buff_state) {
6735        skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400674);
6736        // Validate that DST buffer has correct usage flags set
6737        skip |=
6738            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_19400672,
6739                                     "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6740        skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
6741                                      VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_19402415);
6742        skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
6743        skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400017);
6744    }
6745    lock.unlock();
6746
6747    if (skip) return;
6748
6749    dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
6750                                                     stride, flags);
6751
6752    lock.lock();
6753    if (cb_node && dst_buff_state) {
6754        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
6755        cb_node->validate_functions.emplace_back([=]() {
6756            SetBufferMemoryValid(dev_data, dst_buff_state, true);
6757            return false;
6758        });
6759        cb_node->queryUpdates.emplace_back([=](VkQueue q) {
6760            return validateQuery(q, cb_node, queryPool, firstQuery, queryCount);
6761        });
6762        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6763                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
6764    }
6765}
6766
6767VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
6768                                            uint32_t offset, uint32_t size, const void *pValues) {
6769    bool skip = false;
6770    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6771    unique_lock_t lock(global_lock);
6772    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6773    if (cb_state) {
6774        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6775                                      VALIDATION_ERROR_1bc02415);
6776        skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
6777    }
6778    skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
6779    if (0 == stageFlags) {
6780        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6781                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc2dc03, "DS",
6782                        "vkCmdPushConstants() call has no stageFlags set. %s", validation_error_map[VALIDATION_ERROR_1bc2dc03]);
6783    }
6784
6785    // Check if specified push constant range falls within a pipeline-defined range which has matching stageFlags.
6786    // The spec doesn't seem to disallow having multiple push constant ranges with the
6787    // same offset and size, but different stageFlags.  So we can't just check the
6788    // stageFlags in the first range with matching offset and size.
6789    if (!skip) {
6790        const auto &ranges = getPipelineLayout(dev_data, layout)->push_constant_ranges;
6791        bool found_matching_range = false;
6792        for (const auto &range : ranges) {
6793            if ((stageFlags == range.stageFlags) && (offset >= range.offset) && (offset + size <= range.offset + range.size)) {
6794                found_matching_range = true;
6795                break;
6796            }
6797        }
6798        if (!found_matching_range) {
6799            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6800                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc002de, "DS",
6801                            "vkCmdPushConstants() stageFlags = 0x%" PRIx32
6802                            " do not match the stageFlags in any of the ranges with"
6803                            " offset = %d and size = %d in pipeline layout 0x%" PRIx64 ". %s",
6804                            (uint32_t)stageFlags, offset, size, HandleToUint64(layout),
6805                            validation_error_map[VALIDATION_ERROR_1bc002de]);
6806        }
6807    }
6808    lock.unlock();
6809    if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
6810}
6811
6812VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
6813                                             VkQueryPool queryPool, uint32_t slot) {
6814    bool skip = false;
6815    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6816    unique_lock_t lock(global_lock);
6817    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6818    if (cb_state) {
6819        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6820                                      VALIDATION_ERROR_1e802415);
6821        skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
6822    }
6823    lock.unlock();
6824
6825    if (skip) return;
6826
6827    dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
6828
6829    lock.lock();
6830    if (cb_state) {
6831        QueryObject query = {queryPool, slot};
6832        cb_state->queryUpdates.emplace_back([=](VkQueue q) {return setQueryState(q, commandBuffer, query, true);});
6833    }
6834}
6835
6836static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
6837                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
6838                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
6839    bool skip = false;
6840
6841    for (uint32_t attach = 0; attach < count; attach++) {
6842        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
6843            // Attachment counts are verified elsewhere, but prevent an invalid access
6844            if (attachments[attach].attachment < fbci->attachmentCount) {
6845                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
6846                auto view_state = GetImageViewState(dev_data, *image_view);
6847                if (view_state) {
6848                    const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
6849                    if (ici != nullptr) {
6850                        if ((ici->usage & usage_flag) == 0) {
6851                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6852                                            VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, error_code, "DS",
6853                                            "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
6854                                            "IMAGE_USAGE flags (%s). %s",
6855                                            attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
6856                                            validation_error_map[error_code]);
6857                        }
6858                    }
6859                }
6860            }
6861        }
6862    }
6863    return skip;
6864}
6865
6866// Validate VkFramebufferCreateInfo which includes:
6867// 1. attachmentCount equals renderPass attachmentCount
6868// 2. corresponding framebuffer and renderpass attachments have matching formats
6869// 3. corresponding framebuffer and renderpass attachments have matching sample counts
6870// 4. fb attachments only have a single mip level
6871// 5. fb attachment dimensions are each at least as large as the fb
6872// 6. fb attachments use idenity swizzle
6873// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
6874// 8. fb dimensions are within physical device limits
6875static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
6876    bool skip = false;
6877
6878    auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
6879    if (rp_state) {
6880        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
6881        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
6882            skip |= log_msg(
6883                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6884                HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006d8, "DS",
6885                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
6886                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
6887                pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass),
6888                validation_error_map[VALIDATION_ERROR_094006d8]);
6889        } else {
6890            // attachmentCounts match, so make sure corresponding attachment details line up
6891            const VkImageView *image_views = pCreateInfo->pAttachments;
6892            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
6893                auto view_state = GetImageViewState(dev_data, image_views[i]);
6894                auto &ivci = view_state->create_info;
6895                if (ivci.format != rpci->pAttachments[i].format) {
6896                    skip |= log_msg(
6897                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6898                        HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e0, "DS",
6899                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
6900                        "the format of "
6901                        "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
6902                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
6903                        HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e0]);
6904                }
6905                const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
6906                if (ici->samples != rpci->pAttachments[i].samples) {
6907                    skip |= log_msg(
6908                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6909                        HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e2, "DS",
6910                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
6911                        "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
6912                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
6913                        HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e2]);
6914                }
6915                // Verify that view only has a single mip level
6916                if (ivci.subresourceRange.levelCount != 1) {
6917                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
6918                                    0, __LINE__, VALIDATION_ERROR_094006e6, "DS",
6919                                    "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
6920                                    "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
6921                                    i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_094006e6]);
6922                }
6923                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
6924                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
6925                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
6926                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
6927                    (mip_height < pCreateInfo->height)) {
6928                    skip |= log_msg(
6929                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6930                        VALIDATION_ERROR_094006e4, "DS",
6931                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
6932                        "than the corresponding framebuffer dimensions. Here are the respective dimensions for attachment #%u, "
6933                        "framebuffer:\n"
6934                        "width: %u, %u\n"
6935                        "height: %u, %u\n"
6936                        "layerCount: %u, %u\n%s",
6937                        i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height, pCreateInfo->height,
6938                        ivci.subresourceRange.layerCount, pCreateInfo->layers, validation_error_map[VALIDATION_ERROR_094006e4]);
6939                }
6940                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
6941                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
6942                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
6943                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
6944                    skip |= log_msg(
6945                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6946                        VALIDATION_ERROR_094006e8, "DS",
6947                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
6948                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
6949                        "r swizzle = %s\n"
6950                        "g swizzle = %s\n"
6951                        "b swizzle = %s\n"
6952                        "a swizzle = %s\n"
6953                        "%s",
6954                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
6955                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
6956                        validation_error_map[VALIDATION_ERROR_094006e8]);
6957                }
6958            }
6959        }
6960        // Verify correct attachment usage flags
6961        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
6962            // Verify input attachments:
6963            skip |=
6964                MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
6965                           pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_094006de);
6966            // Verify color attachments:
6967            skip |=
6968                MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
6969                           pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_094006da);
6970            // Verify depth/stencil attachments:
6971            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
6972                skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
6973                                   VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_094006dc);
6974            }
6975        }
6976    }
6977    // Verify FB dimensions are within physical device limits
6978    if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
6979        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6980                        VALIDATION_ERROR_094006ec, "DS",
6981                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
6982                        "Requested width: %u, device max: %u\n"
6983                        "%s",
6984                        pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
6985                        validation_error_map[VALIDATION_ERROR_094006ec]);
6986    }
6987    if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
6988        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6989                        VALIDATION_ERROR_094006f0, "DS",
6990                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
6991                        "Requested height: %u, device max: %u\n"
6992                        "%s",
6993                        pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
6994                        validation_error_map[VALIDATION_ERROR_094006f0]);
6995    }
6996    if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
6997        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6998                        VALIDATION_ERROR_094006f4, "DS",
6999                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
7000                        "Requested layers: %u, device max: %u\n"
7001                        "%s",
7002                        pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
7003                        validation_error_map[VALIDATION_ERROR_094006f4]);
7004    }
7005    // Verify FB dimensions are greater than zero
7006    if (pCreateInfo->width <= 0) {
7007        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7008                        VALIDATION_ERROR_094006ea, "DS",
7009                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero. %s",
7010                        validation_error_map[VALIDATION_ERROR_094006ea]);
7011    }
7012    if (pCreateInfo->height <= 0) {
7013        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7014                        VALIDATION_ERROR_094006ee, "DS",
7015                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero. %s",
7016                        validation_error_map[VALIDATION_ERROR_094006ee]);
7017    }
7018    if (pCreateInfo->layers <= 0) {
7019        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7020                        VALIDATION_ERROR_094006f2, "DS",
7021                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero. %s",
7022                        validation_error_map[VALIDATION_ERROR_094006f2]);
7023    }
7024    return skip;
7025}
7026
7027// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
7028//  Return true if an error is encountered and callback returns true to skip call down chain
7029//   false indicates that call down chain should proceed
7030static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
7031    // TODO : Verify that renderPass FB is created with is compatible with FB
7032    bool skip = false;
7033    skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
7034    return skip;
7035}
7036
7037// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
7038static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
7039    // Shadow create info and store in map
7040    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
7041        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
7042
7043    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
7044        VkImageView view = pCreateInfo->pAttachments[i];
7045        auto view_state = GetImageViewState(dev_data, view);
7046        if (!view_state) {
7047            continue;
7048        }
7049        MT_FB_ATTACHMENT_INFO fb_info;
7050        fb_info.view_state = view_state;
7051        fb_info.image = view_state->create_info.image;
7052        fb_state->attachments.push_back(fb_info);
7053    }
7054    dev_data->frameBufferMap[fb] = std::move(fb_state);
7055}
7056
7057VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
7058                                                 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
7059    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7060    unique_lock_t lock(global_lock);
7061    bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
7062    lock.unlock();
7063
7064    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
7065
7066    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
7067
7068    if (VK_SUCCESS == result) {
7069        lock.lock();
7070        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
7071        lock.unlock();
7072    }
7073    return result;
7074}
7075
7076static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
7077                           std::unordered_set<uint32_t> &processed_nodes) {
7078    // If we have already checked this node we have not found a dependency path so return false.
7079    if (processed_nodes.count(index)) return false;
7080    processed_nodes.insert(index);
7081    const DAGNode &node = subpass_to_node[index];
7082    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
7083    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
7084        for (auto elem : node.prev) {
7085            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
7086        }
7087    } else {
7088        return true;
7089    }
7090    return false;
7091}
7092
7093static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
7094                                  const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
7095                                  bool &skip) {
7096    bool result = true;
7097    // Loop through all subpasses that share the same attachment and make sure a dependency exists
7098    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
7099        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
7100        const DAGNode &node = subpass_to_node[subpass];
7101        // Check for a specified dependency between the two nodes. If one exists we are done.
7102        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
7103        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
7104        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
7105            // If no dependency exits an implicit dependency still might. If not, throw an error.
7106            std::unordered_set<uint32_t> processed_nodes;
7107            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
7108                  FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
7109                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7110                                __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7111                                "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
7112                                dependent_subpasses[k]);
7113                result = false;
7114            }
7115        }
7116    }
7117    return result;
7118}
7119
7120static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
7121                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
7122    const DAGNode &node = subpass_to_node[index];
7123    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
7124    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
7125    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7126        if (attachment == subpass.pColorAttachments[j].attachment) return true;
7127    }
7128    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7129        if (attachment == subpass.pInputAttachments[j].attachment) return true;
7130    }
7131    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7132        if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
7133    }
7134    bool result = false;
7135    // Loop through previous nodes and see if any of them write to the attachment.
7136    for (auto elem : node.prev) {
7137        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
7138    }
7139    // If the attachment was written to by a previous node than this node needs to preserve it.
7140    if (result && depth > 0) {
7141        bool has_preserved = false;
7142        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
7143            if (subpass.pPreserveAttachments[j] == attachment) {
7144                has_preserved = true;
7145                break;
7146            }
7147        }
7148        if (!has_preserved) {
7149            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7150                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7151                            "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
7152        }
7153    }
7154    return result;
7155}
7156
7157template <class T>
7158bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
7159    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
7160           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
7161}
7162
7163bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
7164    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
7165            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
7166}
7167
7168static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
7169                                 RENDER_PASS_STATE const *renderPass) {
7170    bool skip = false;
7171    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
7172    auto const pCreateInfo = renderPass->createInfo.ptr();
7173    auto const &subpass_to_node = renderPass->subpassToNode;
7174    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
7175    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
7176    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
7177    // Find overlapping attachments
7178    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
7179        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
7180            VkImageView viewi = pFramebufferInfo->pAttachments[i];
7181            VkImageView viewj = pFramebufferInfo->pAttachments[j];
7182            if (viewi == viewj) {
7183                overlapping_attachments[i].push_back(j);
7184                overlapping_attachments[j].push_back(i);
7185                continue;
7186            }
7187            auto view_state_i = GetImageViewState(dev_data, viewi);
7188            auto view_state_j = GetImageViewState(dev_data, viewj);
7189            if (!view_state_i || !view_state_j) {
7190                continue;
7191            }
7192            auto view_ci_i = view_state_i->create_info;
7193            auto view_ci_j = view_state_j->create_info;
7194            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
7195                overlapping_attachments[i].push_back(j);
7196                overlapping_attachments[j].push_back(i);
7197                continue;
7198            }
7199            auto image_data_i = GetImageState(dev_data, view_ci_i.image);
7200            auto image_data_j = GetImageState(dev_data, view_ci_j.image);
7201            if (!image_data_i || !image_data_j) {
7202                continue;
7203            }
7204            if (image_data_i->binding.mem == image_data_j->binding.mem &&
7205                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
7206                                   image_data_j->binding.size)) {
7207                overlapping_attachments[i].push_back(j);
7208                overlapping_attachments[j].push_back(i);
7209            }
7210        }
7211    }
7212    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
7213        uint32_t attachment = i;
7214        for (auto other_attachment : overlapping_attachments[i]) {
7215            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
7216                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7217                                HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
7218                                "Attachment %d aliases attachment %d but doesn't "
7219                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
7220                                attachment, other_attachment, validation_error_map[VALIDATION_ERROR_12200682]);
7221            }
7222            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
7223                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7224                                HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
7225                                "Attachment %d aliases attachment %d but doesn't "
7226                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
7227                                other_attachment, attachment, validation_error_map[VALIDATION_ERROR_12200682]);
7228            }
7229        }
7230    }
7231    // Find for each attachment the subpasses that use them.
7232    unordered_set<uint32_t> attachmentIndices;
7233    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7234        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7235        attachmentIndices.clear();
7236        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7237            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7238            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7239            input_attachment_to_subpass[attachment].push_back(i);
7240            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7241                input_attachment_to_subpass[overlapping_attachment].push_back(i);
7242            }
7243        }
7244        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7245            uint32_t attachment = subpass.pColorAttachments[j].attachment;
7246            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7247            output_attachment_to_subpass[attachment].push_back(i);
7248            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7249                output_attachment_to_subpass[overlapping_attachment].push_back(i);
7250            }
7251            attachmentIndices.insert(attachment);
7252        }
7253        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7254            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
7255            output_attachment_to_subpass[attachment].push_back(i);
7256            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7257                output_attachment_to_subpass[overlapping_attachment].push_back(i);
7258            }
7259
7260            if (attachmentIndices.count(attachment)) {
7261                skip |=
7262                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7263                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7264                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
7265            }
7266        }
7267    }
7268    // If there is a dependency needed make sure one exists
7269    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7270        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7271        // If the attachment is an input then all subpasses that output must have a dependency relationship
7272        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7273            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7274            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7275            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7276        }
7277        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
7278        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7279            uint32_t attachment = subpass.pColorAttachments[j].attachment;
7280            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7281            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7282            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
7283        }
7284        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7285            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
7286            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7287            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
7288        }
7289    }
7290    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
7291    // written.
7292    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7293        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7294        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7295            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
7296        }
7297    }
7298    return skip;
7299}
7300
7301static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
7302                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency,
7303                          std::vector<int32_t> &subpass_to_dep_index) {
7304    bool skip = false;
7305    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7306        DAGNode &subpass_node = subpass_to_node[i];
7307        subpass_node.pass = i;
7308        subpass_to_dep_index[i] = -1;  // Default to no dependency and overwrite below as needed
7309    }
7310    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
7311        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
7312        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
7313            if (dependency.srcSubpass == dependency.dstSubpass) {
7314                skip |=
7315                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7316                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
7317            }
7318        } else if (dependency.srcSubpass > dependency.dstSubpass) {
7319            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7320                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7321                            "Dependency graph must be specified such that an earlier pass cannot depend on a later pass.");
7322        } else if (dependency.srcSubpass == dependency.dstSubpass) {
7323            has_self_dependency[dependency.srcSubpass] = true;
7324        } else {
7325            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
7326            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
7327        }
7328        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
7329            subpass_to_dep_index[dependency.srcSubpass] = i;
7330        }
7331    }
7332    return skip;
7333}
7334
7335VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
7336                                                  const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
7337    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7338    bool spirv_valid;
7339
7340    if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid))
7341        return VK_ERROR_VALIDATION_FAILED_EXT;
7342
7343    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
7344
7345    if (res == VK_SUCCESS) {
7346        lock_guard_t lock(global_lock);
7347        unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo) : new shader_module());
7348        dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
7349    }
7350    return res;
7351}
7352
7353static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
7354    bool skip = false;
7355    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
7356        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7357                        VALIDATION_ERROR_12200684, "DS",
7358                        "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s", type,
7359                        attachment, attachment_count, validation_error_map[VALIDATION_ERROR_12200684]);
7360    }
7361    return skip;
7362}
7363
7364static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
7365
7366static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
7367    bool skip = false;
7368    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7369        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7370        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
7371            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7372                            __LINE__, VALIDATION_ERROR_14000698, "DS",
7373                            "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s", i,
7374                            validation_error_map[VALIDATION_ERROR_14000698]);
7375        }
7376
7377        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
7378            uint32_t attachment = subpass.pPreserveAttachments[j];
7379            if (attachment == VK_ATTACHMENT_UNUSED) {
7380                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7381                                __LINE__, VALIDATION_ERROR_140006aa, "DS",
7382                                "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
7383                                validation_error_map[VALIDATION_ERROR_140006aa]);
7384            } else {
7385                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
7386
7387                bool found = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment);
7388                for (uint32_t r = 0; !found && r < subpass.inputAttachmentCount; ++r) {
7389                    found = (subpass.pInputAttachments[r].attachment == attachment);
7390                }
7391                for (uint32_t r = 0; !found && r < subpass.colorAttachmentCount; ++r) {
7392                    found = (subpass.pColorAttachments[r].attachment == attachment) ||
7393                            (subpass.pResolveAttachments != NULL && subpass.pResolveAttachments[r].attachment == attachment);
7394                }
7395                if (found) {
7396                    skip |= log_msg(
7397                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7398                        VALIDATION_ERROR_140006ac, "DS",
7399                        "CreateRenderPass: subpass %u pPreserveAttachments[%u] (%u) must not be used elsewhere in the subpass. %s",
7400                        i, j, attachment, validation_error_map[VALIDATION_ERROR_140006ac]);
7401                }
7402            }
7403        }
7404
7405        auto subpass_performs_resolve =
7406            subpass.pResolveAttachments &&
7407            std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
7408                        [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
7409
7410        unsigned sample_count = 0;
7411
7412        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7413            uint32_t attachment;
7414            if (subpass.pResolveAttachments) {
7415                attachment = subpass.pResolveAttachments[j].attachment;
7416                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
7417
7418                if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
7419                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
7420                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7421                                    0, __LINE__, VALIDATION_ERROR_140006a2, "DS",
7422                                    "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
7423                                    "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
7424                                    i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
7425                                    validation_error_map[VALIDATION_ERROR_140006a2]);
7426                }
7427
7428                if (!skip && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
7429                    subpass.pColorAttachments[j].attachment == VK_ATTACHMENT_UNUSED) {
7430                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7431                                    0, __LINE__, VALIDATION_ERROR_1400069e, "DS",
7432                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
7433                                    "which has attachment=VK_ATTACHMENT_UNUSED. %s",
7434                                    i, attachment, validation_error_map[VALIDATION_ERROR_1400069e]);
7435                }
7436            }
7437            attachment = subpass.pColorAttachments[j].attachment;
7438            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
7439
7440            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
7441                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
7442
7443                if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
7444                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7445                                    0, __LINE__, VALIDATION_ERROR_140006a0, "DS",
7446                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
7447                                    "which has VK_SAMPLE_COUNT_1_BIT. %s",
7448                                    i, attachment, validation_error_map[VALIDATION_ERROR_140006a0]);
7449                }
7450
7451                if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
7452                    const auto &color_desc = pCreateInfo->pAttachments[attachment];
7453                    const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
7454                    if (color_desc.format != resolve_desc.format) {
7455                        skip |=
7456                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7457                                    0, __LINE__, VALIDATION_ERROR_140006a4, "DS",
7458                                    "CreateRenderPass:  Subpass %u pColorAttachments[%u] resolves to an attachment with a "
7459                                    "different format. "
7460                                    "color format: %u, resolve format: %u. %s",
7461                                    i, j, color_desc.format, resolve_desc.format, validation_error_map[VALIDATION_ERROR_140006a4]);
7462                    }
7463                }
7464            }
7465        }
7466
7467        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7468            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
7469            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
7470
7471            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
7472                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
7473            }
7474        }
7475
7476        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7477            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7478            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
7479        }
7480
7481        if (sample_count && !IsPowerOfTwo(sample_count)) {
7482            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7483                            __LINE__, VALIDATION_ERROR_0082b401, "DS",
7484                            "CreateRenderPass:  Subpass %u attempts to render to "
7485                            "attachments with inconsistent sample counts. %s",
7486                            i, validation_error_map[VALIDATION_ERROR_0082b401]);
7487        }
7488    }
7489    return skip;
7490}
7491
7492static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass,
7493                                   uint32_t index,
7494                                   bool is_read) {
7495    if (index == VK_ATTACHMENT_UNUSED)
7496        return;
7497
7498    if (!render_pass->attachment_first_read.count(index))
7499        render_pass->attachment_first_read[index] = is_read;
7500}
7501
7502VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
7503                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
7504    bool skip = false;
7505    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7506
7507    unique_lock_t lock(global_lock);
7508    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
7509    //       ValidateLayouts.
7510    skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
7511    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
7512        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
7513                                             VALIDATION_ERROR_13e006b8, VALIDATION_ERROR_13e006bc);
7514        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
7515                                             VALIDATION_ERROR_13e006ba, VALIDATION_ERROR_13e006be);
7516    }
7517    if (!skip) {
7518        skip |= ValidateLayouts(dev_data, device, pCreateInfo);
7519    }
7520    lock.unlock();
7521
7522    if (skip) {
7523        return VK_ERROR_VALIDATION_FAILED_EXT;
7524    }
7525
7526    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
7527
7528    if (VK_SUCCESS == result) {
7529        lock.lock();
7530
7531        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
7532        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
7533        std::vector<int32_t> subpass_to_dep_index(pCreateInfo->subpassCount);
7534        skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency, subpass_to_dep_index);
7535
7536        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
7537        render_pass->renderPass = *pRenderPass;
7538        render_pass->hasSelfDependency = has_self_dependency;
7539        render_pass->subpassToNode = subpass_to_node;
7540        render_pass->subpass_to_dependency_index = subpass_to_dep_index;
7541
7542        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7543            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7544            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7545                MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
7546
7547                // resolve attachments are considered to be written
7548                if (subpass.pResolveAttachments) {
7549                    MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
7550                }
7551            }
7552            if (subpass.pDepthStencilAttachment) {
7553                MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
7554            }
7555            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7556                MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
7557            }
7558        }
7559
7560        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
7561    }
7562    return result;
7563}
7564
7565static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
7566                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
7567    bool skip = false;
7568    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7569        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7570                        HandleToUint64(pCB->commandBuffer), __LINE__, error_code, "DS",
7571                        "Cannot execute command %s on a secondary command buffer. %s", cmd_name, validation_error_map[error_code]);
7572    }
7573    return skip;
7574}
7575
7576static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
7577    bool skip = false;
7578    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
7579        &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
7580    if (pRenderPassBegin->renderArea.offset.x < 0 ||
7581        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
7582        pRenderPassBegin->renderArea.offset.y < 0 ||
7583        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
7584        skip |= static_cast<bool>(log_msg(
7585            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7586            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
7587            "Cannot execute a render pass with renderArea not within the bound of the "
7588            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
7589            "height %d.",
7590            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
7591            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
7592    }
7593    return skip;
7594}
7595
7596// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
7597// [load|store]Op flag must be checked
7598// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
7599template <typename T>
7600static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
7601    if (color_depth_op != op && stencil_op != op) {
7602        return false;
7603    }
7604    bool check_color_depth_load_op = !FormatIsStencilOnly(format);
7605    bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
7606
7607    return ((check_color_depth_load_op && (color_depth_op == op)) ||
7608            (check_stencil_load_op && (stencil_op == op)));
7609}
7610
7611VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
7612                                              VkSubpassContents contents) {
7613    bool skip = false;
7614    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7615    unique_lock_t lock(global_lock);
7616    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
7617    auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
7618    auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
7619    if (cb_node) {
7620        if (render_pass_state) {
7621            uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
7622            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
7623            for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
7624                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
7625                auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
7626                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
7627                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
7628                    clear_op_size = static_cast<uint32_t>(i) + 1;
7629                    std::function<bool()> function = [=]() {
7630                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
7631                        return false;
7632                    };
7633                    cb_node->validate_functions.push_back(function);
7634                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
7635                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
7636                    std::function<bool()> function = [=]() {
7637                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
7638                        return false;
7639                    };
7640                    cb_node->validate_functions.push_back(function);
7641                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
7642                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
7643                    std::function<bool()> function = [=]() {
7644                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
7645                                                          "vkCmdBeginRenderPass()");
7646                    };
7647                    cb_node->validate_functions.push_back(function);
7648                }
7649                if (render_pass_state->attachment_first_read[i]) {
7650                    std::function<bool()> function = [=]() {
7651                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
7652                                                          "vkCmdBeginRenderPass()");
7653                    };
7654                    cb_node->validate_functions.push_back(function);
7655                }
7656            }
7657            if (clear_op_size > pRenderPassBegin->clearValueCount) {
7658                skip |= log_msg(
7659                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7660                    HandleToUint64(render_pass_state->renderPass), __LINE__, VALIDATION_ERROR_1200070c, "DS",
7661                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
7662                    "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
7663                    "0x%" PRIx64
7664                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
7665                    "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
7666                    "attachments that aren't cleared they will be ignored. %s",
7667                    pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass), clear_op_size,
7668                    clear_op_size - 1, validation_error_map[VALIDATION_ERROR_1200070c]);
7669            }
7670            skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
7671            skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
7672                                                          GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
7673            skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00017);
7674            skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
7675            skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00019);
7676            skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
7677                                          VALIDATION_ERROR_17a02415);
7678            skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
7679            cb_node->activeRenderPass = render_pass_state;
7680            // This is a shallow copy as that is all that is needed for now
7681            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
7682            cb_node->activeSubpass = 0;
7683            cb_node->activeSubpassContents = contents;
7684            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
7685            // Connect this framebuffer and its children to this cmdBuffer
7686            AddFramebufferBinding(dev_data, cb_node, framebuffer);
7687            // transition attachments to the correct layouts for beginning of renderPass and first subpass
7688            TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
7689        }
7690    }
7691    lock.unlock();
7692    if (!skip) {
7693        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
7694    }
7695}
7696
7697VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
7698    bool skip = false;
7699    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7700    unique_lock_t lock(global_lock);
7701    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7702    if (pCB) {
7703        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600019);
7704        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b602415);
7705        skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
7706        skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600017);
7707
7708        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
7709        if (pCB->activeSubpass == subpassCount - 1) {
7710            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7711                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1b60071a, "DS",
7712                            "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s",
7713                            validation_error_map[VALIDATION_ERROR_1b60071a]);
7714        }
7715    }
7716    lock.unlock();
7717
7718    if (skip) return;
7719
7720    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
7721
7722    if (pCB) {
7723        lock.lock();
7724        pCB->activeSubpass++;
7725        pCB->activeSubpassContents = contents;
7726        TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
7727                                 GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
7728    }
7729}
7730
7731VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
7732    bool skip = false;
7733    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7734    unique_lock_t lock(global_lock);
7735    auto pCB = GetCBNode(dev_data, commandBuffer);
7736    FRAMEBUFFER_STATE *framebuffer = NULL;
7737    if (pCB) {
7738        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
7739        framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
7740        if (rp_state) {
7741            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
7742                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7743                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
7744                                VALIDATION_ERROR_1b00071c, "DS", "vkCmdEndRenderPass(): Called before reaching final subpass. %s",
7745                                validation_error_map[VALIDATION_ERROR_1b00071c]);
7746            }
7747
7748            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
7749                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
7750                auto pAttachment = &rp_state->createInfo.pAttachments[i];
7751                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
7752                                                         VK_ATTACHMENT_STORE_OP_STORE)) {
7753                    std::function<bool()> function = [=]() {
7754                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
7755                        return false;
7756                    };
7757                    pCB->validate_functions.push_back(function);
7758                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
7759                                                                pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
7760                    std::function<bool()> function = [=]() {
7761                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
7762                        return false;
7763                    };
7764                    pCB->validate_functions.push_back(function);
7765                }
7766            }
7767        }
7768        skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_1b000017);
7769        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_1b000019);
7770        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b002415);
7771        skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
7772    }
7773    lock.unlock();
7774
7775    if (skip) return;
7776
7777    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
7778
7779    if (pCB) {
7780        lock.lock();
7781        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
7782        pCB->activeRenderPass = nullptr;
7783        pCB->activeSubpass = 0;
7784        pCB->activeFramebuffer = VK_NULL_HANDLE;
7785    }
7786}
7787
7788static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
7789                                        uint32_t secondaryAttach, const char *msg) {
7790    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7791                   HandleToUint64(secondaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c4, "DS",
7792                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64
7793                   " which has a render pass "
7794                   "that is not compatible with the Primary Cmd Buffer current render pass. "
7795                   "Attachment %u is not compatible with %u: %s. %s",
7796                   HandleToUint64(secondaryBuffer), primaryAttach, secondaryAttach, msg,
7797                   validation_error_map[VALIDATION_ERROR_1b2000c4]);
7798}
7799
7800static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7801                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
7802                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
7803                                            uint32_t secondaryAttach, bool is_multi) {
7804    bool skip = false;
7805    if (primaryPassCI->attachmentCount <= primaryAttach) {
7806        primaryAttach = VK_ATTACHMENT_UNUSED;
7807    }
7808    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
7809        secondaryAttach = VK_ATTACHMENT_UNUSED;
7810    }
7811    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
7812        return skip;
7813    }
7814    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
7815        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
7816                                            "The first is unused while the second is not.");
7817        return skip;
7818    }
7819    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
7820        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
7821                                            "The second is unused while the first is not.");
7822        return skip;
7823    }
7824    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
7825        skip |=
7826            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
7827    }
7828    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
7829        skip |=
7830            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
7831    }
7832    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
7833        skip |=
7834            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
7835    }
7836    return skip;
7837}
7838
7839static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7840                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
7841                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
7842    bool skip = false;
7843    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
7844    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
7845    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
7846    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
7847        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
7848        if (i < primary_desc.inputAttachmentCount) {
7849            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
7850        }
7851        if (i < secondary_desc.inputAttachmentCount) {
7852            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
7853        }
7854        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
7855                                                secondaryPassCI, secondary_input_attach, is_multi);
7856    }
7857    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
7858    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
7859        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
7860        if (i < primary_desc.colorAttachmentCount) {
7861            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
7862        }
7863        if (i < secondary_desc.colorAttachmentCount) {
7864            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
7865        }
7866        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
7867                                                secondaryPassCI, secondary_color_attach, is_multi);
7868        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
7869        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
7870            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
7871        }
7872        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
7873            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
7874        }
7875        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach, secondaryBuffer,
7876                                                secondaryPassCI, secondary_resolve_attach, is_multi);
7877    }
7878    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
7879    if (primary_desc.pDepthStencilAttachment) {
7880        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
7881    }
7882    if (secondary_desc.pDepthStencilAttachment) {
7883        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
7884    }
7885    skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach, secondaryBuffer,
7886                                            secondaryPassCI, secondary_depthstencil_attach, is_multi);
7887    return skip;
7888}
7889
7890// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
7891//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
7892//  will then feed into this function
7893static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7894                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
7895                                            VkRenderPassCreateInfo const *secondaryPassCI) {
7896    bool skip = false;
7897
7898    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
7899        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7900                        HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7901                        "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
7902                        " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
7903                        " that has a subpassCount of %u.",
7904                        HandleToUint64(secondaryBuffer), secondaryPassCI->subpassCount, HandleToUint64(primaryBuffer),
7905                        primaryPassCI->subpassCount);
7906    } else {
7907        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
7908            skip |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
7909                                                 primaryPassCI->subpassCount > 1);
7910        }
7911    }
7912    return skip;
7913}
7914
7915static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
7916                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
7917    bool skip = false;
7918    if (!pSubCB->beginInfo.pInheritanceInfo) {
7919        return skip;
7920    }
7921    VkFramebuffer primary_fb = pCB->activeFramebuffer;
7922    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
7923    if (secondary_fb != VK_NULL_HANDLE) {
7924        if (primary_fb != secondary_fb) {
7925            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7926                            HandleToUint64(primaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c6, "DS",
7927                            "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
7928                            " which has a framebuffer 0x%" PRIx64
7929                            " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
7930                            HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb),
7931                            validation_error_map[VALIDATION_ERROR_1b2000c6]);
7932        }
7933        auto fb = GetFramebufferState(dev_data, secondary_fb);
7934        if (!fb) {
7935            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7936                            HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7937                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7938                            "which has invalid framebuffer 0x%" PRIx64 ".",
7939                            (void *)secondaryBuffer, HandleToUint64(secondary_fb));
7940            return skip;
7941        }
7942        auto cb_renderpass = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
7943        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
7944            skip |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
7945                                                    cb_renderpass->createInfo.ptr());
7946        }
7947    }
7948    return skip;
7949}
7950
7951static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
7952    bool skip = false;
7953    unordered_set<int> activeTypes;
7954    for (auto queryObject : pCB->activeQueries) {
7955        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
7956        if (queryPoolData != dev_data->queryPoolMap.end()) {
7957            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
7958                pSubCB->beginInfo.pInheritanceInfo) {
7959                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
7960                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
7961                    skip |= log_msg(
7962                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7963                        HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1b2000d0, "DS",
7964                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7965                        "which has invalid active query pool 0x%" PRIx64
7966                        ". Pipeline statistics is being queried so the command "
7967                        "buffer must have all bits set on the queryPool. %s",
7968                        pCB->commandBuffer, HandleToUint64(queryPoolData->first), validation_error_map[VALIDATION_ERROR_1b2000d0]);
7969                }
7970            }
7971            activeTypes.insert(queryPoolData->second.createInfo.queryType);
7972        }
7973    }
7974    for (auto queryObject : pSubCB->startedQueries) {
7975        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
7976        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
7977            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7978                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7979                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7980                            "which has invalid active query pool 0x%" PRIx64
7981                            "of type %d but a query of that type has been started on "
7982                            "secondary Cmd Buffer 0x%p.",
7983                            pCB->commandBuffer, HandleToUint64(queryPoolData->first), queryPoolData->second.createInfo.queryType,
7984                            pSubCB->commandBuffer);
7985        }
7986    }
7987
7988    auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
7989    auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
7990    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
7991        skip |=
7992            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7993                    HandleToUint64(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
7994                    "vkCmdExecuteCommands(): Primary command buffer 0x%p"
7995                    " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
7996                    pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
7997    }
7998
7999    return skip;
8000}
8001
8002VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
8003                                              const VkCommandBuffer *pCommandBuffers) {
8004    bool skip = false;
8005    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8006    unique_lock_t lock(global_lock);
8007    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8008    if (pCB) {
8009        GLOBAL_CB_NODE *pSubCB = NULL;
8010        for (uint32_t i = 0; i < commandBuffersCount; i++) {
8011            pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
8012            assert(pSubCB);
8013            if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
8014                skip |=
8015                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8016                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000b0, "DS",
8017                            "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
8018                            "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
8019                            pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_1b2000b0]);
8020            } else if (pCB->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
8021                if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
8022                    auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
8023                    if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
8024                        skip |= log_msg(
8025                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8026                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000c0, "DS",
8027                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
8028                            ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT "
8029                            "set. %s",
8030                            pCommandBuffers[i], HandleToUint64(pCB->activeRenderPass->renderPass),
8031                            validation_error_map[VALIDATION_ERROR_1b2000c0]);
8032                    } else {
8033                        // Make sure render pass is compatible with parent command buffer pass if has continue
8034                        if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
8035                            skip |=
8036                                validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
8037                                                                pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
8038                        }
8039                        //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
8040                        skip |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
8041                    }
8042                    string errorString = "";
8043                    // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
8044                    if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
8045                        !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
8046                                                         secondary_rp_state->createInfo.ptr(), errorString)) {
8047                        skip |= log_msg(
8048                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8049                            HandleToUint64(pCommandBuffers[i]), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
8050                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
8051                            ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
8052                            pCommandBuffers[i], HandleToUint64(pSubCB->beginInfo.pInheritanceInfo->renderPass), commandBuffer,
8053                            HandleToUint64(pCB->activeRenderPass->renderPass), errorString.c_str());
8054                    }
8055                }
8056            }
8057            // TODO(mlentine): Move more logic into this method
8058            skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
8059            skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0, VALIDATION_ERROR_1b2000b2);
8060            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
8061                if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
8062                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8063                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
8064                                    VALIDATION_ERROR_1b2000b4, "DS",
8065                                    "Attempt to simultaneously execute command buffer 0x%p"
8066                                    " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
8067                                    pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_1b2000b4]);
8068                }
8069                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
8070                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
8071                    skip |= log_msg(
8072                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8073                        HandleToUint64(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
8074                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
8075                        "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
8076                        "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
8077                        "set, even though it does.",
8078                        pCommandBuffers[i], pCB->commandBuffer);
8079                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
8080                }
8081            }
8082            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
8083                skip |=
8084                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8085                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000ca, "DS",
8086                            "vkCmdExecuteCommands(): Secondary Command Buffer "
8087                            "(0x%p) cannot be submitted with a query in "
8088                            "flight and inherited queries not "
8089                            "supported on this device. %s",
8090                            pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_1b2000ca]);
8091            }
8092            // TODO: separate validate from update! This is very tangled.
8093            // Propagate layout transitions to the primary cmd buffer
8094            for (auto ilm_entry : pSubCB->imageLayoutMap) {
8095                SetLayout(dev_data, pCB, ilm_entry.first, ilm_entry.second);
8096            }
8097            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
8098            pCB->linkedCommandBuffers.insert(pSubCB);
8099            pSubCB->linkedCommandBuffers.insert(pCB);
8100            for (auto &function : pSubCB->queryUpdates) {
8101                pCB->queryUpdates.push_back(function);
8102            }
8103        }
8104        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_1b200019);
8105        skip |=
8106            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
8107                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b202415);
8108        skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
8109    }
8110    lock.unlock();
8111    if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
8112}
8113
8114VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
8115                                         void **ppData) {
8116    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8117
8118    bool skip = false;
8119    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8120    unique_lock_t lock(global_lock);
8121    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
8122    if (mem_info) {
8123        // TODO : This could me more fine-grained to track just region that is valid
8124        mem_info->global_valid = true;
8125        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
8126        skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
8127        // TODO : Do we need to create new "bound_range" for the mapped range?
8128        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
8129        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
8130             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
8131            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8132                           HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200554, "MEM",
8133                           "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
8134                           HandleToUint64(mem), validation_error_map[VALIDATION_ERROR_31200554]);
8135        }
8136    }
8137    skip |= ValidateMapMemRange(dev_data, mem, offset, size);
8138    lock.unlock();
8139
8140    if (!skip) {
8141        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
8142        if (VK_SUCCESS == result) {
8143            lock.lock();
8144            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
8145            storeMemRanges(dev_data, mem, offset, size);
8146            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
8147            lock.unlock();
8148        }
8149    }
8150    return result;
8151}
8152
8153VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
8154    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8155    bool skip = false;
8156
8157    unique_lock_t lock(global_lock);
8158    skip |= deleteMemRanges(dev_data, mem);
8159    lock.unlock();
8160    if (!skip) {
8161        dev_data->dispatch_table.UnmapMemory(device, mem);
8162    }
8163}
8164
8165static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
8166                                   const VkMappedMemoryRange *pMemRanges) {
8167    bool skip = false;
8168    for (uint32_t i = 0; i < memRangeCount; ++i) {
8169        auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
8170        if (mem_info) {
8171            if (pMemRanges[i].size == VK_WHOLE_SIZE) {
8172                if (mem_info->mem_range.offset > pMemRanges[i].offset) {
8173                    skip |=
8174                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8175                                HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055c, "MEM",
8176                                "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
8177                                ") is less than Memory Object's offset "
8178                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
8179                                funcName, static_cast<size_t>(pMemRanges[i].offset),
8180                                static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_0c20055c]);
8181                }
8182            } else {
8183                const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
8184                                              ? mem_info->alloc_info.allocationSize
8185                                              : (mem_info->mem_range.offset + mem_info->mem_range.size);
8186                if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
8187                    (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
8188                    skip |=
8189                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8190                                HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055a, "MEM",
8191                                "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
8192                                ") exceed the Memory Object's upper-bound "
8193                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
8194                                funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
8195                                static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
8196                                validation_error_map[VALIDATION_ERROR_0c20055a]);
8197                }
8198            }
8199        }
8200    }
8201    return skip;
8202}
8203
8204static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
8205                                                     const VkMappedMemoryRange *mem_ranges) {
8206    bool skip = false;
8207    for (uint32_t i = 0; i < mem_range_count; ++i) {
8208        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
8209        if (mem_info) {
8210            if (mem_info->shadow_copy) {
8211                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
8212                                        ? mem_info->mem_range.size
8213                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
8214                char *data = static_cast<char *>(mem_info->shadow_copy);
8215                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
8216                    if (data[j] != NoncoherentMemoryFillValue) {
8217                        skip |= log_msg(
8218                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8219                            HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
8220                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, HandleToUint64(mem_ranges[i].memory));
8221                    }
8222                }
8223                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
8224                    if (data[j] != NoncoherentMemoryFillValue) {
8225                        skip |= log_msg(
8226                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8227                            HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
8228                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, HandleToUint64(mem_ranges[i].memory));
8229                    }
8230                }
8231                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
8232            }
8233        }
8234    }
8235    return skip;
8236}
8237
8238static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
8239    for (uint32_t i = 0; i < mem_range_count; ++i) {
8240        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
8241        if (mem_info && mem_info->shadow_copy) {
8242            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
8243                                    ? mem_info->mem_range.size
8244                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
8245            char *data = static_cast<char *>(mem_info->shadow_copy);
8246            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
8247        }
8248    }
8249}
8250
8251static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
8252                                                  const VkMappedMemoryRange *mem_ranges) {
8253    bool skip = false;
8254    for (uint32_t i = 0; i < mem_range_count; ++i) {
8255        uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
8256        if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
8257            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8258                            HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c20055e, "MEM",
8259                            "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
8260                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
8261                            func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_0c20055e]);
8262        }
8263        if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
8264            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8265                            HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c200560, "MEM",
8266                            "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
8267                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
8268                            func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_0c200560]);
8269        }
8270    }
8271    return skip;
8272}
8273
8274static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8275                                                   const VkMappedMemoryRange *mem_ranges) {
8276    bool skip = false;
8277    lock_guard_t lock(global_lock);
8278    skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
8279    skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
8280    return skip;
8281}
8282
8283VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
8284                                                       const VkMappedMemoryRange *pMemRanges) {
8285    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8286    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8287
8288    if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
8289        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
8290    }
8291    return result;
8292}
8293
8294static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8295                                                        const VkMappedMemoryRange *mem_ranges) {
8296    bool skip = false;
8297    lock_guard_t lock(global_lock);
8298    skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
8299    return skip;
8300}
8301
8302static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8303                                                       const VkMappedMemoryRange *mem_ranges) {
8304    lock_guard_t lock(global_lock);
8305    // Update our shadow copy with modified driver data
8306    CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
8307}
8308
8309VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
8310                                                            const VkMappedMemoryRange *pMemRanges) {
8311    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8312    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8313
8314    if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
8315        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
8316        if (result == VK_SUCCESS) {
8317            PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
8318        }
8319    }
8320    return result;
8321}
8322
8323static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
8324                                           VkDeviceSize memoryOffset) {
8325    bool skip = false;
8326    if (image_state) {
8327        unique_lock_t lock(global_lock);
8328        // Track objects tied to memory
8329        uint64_t image_handle = HandleToUint64(image);
8330        skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
8331        if (!image_state->memory_requirements_checked) {
8332            // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
8333            // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
8334            // vkGetImageMemoryRequirements()
8335            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8336                            image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
8337                            "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
8338                            " but vkGetImageMemoryRequirements() has not been called on that image.",
8339                            image_handle);
8340            // Make the call for them so we can verify the state
8341            lock.unlock();
8342            dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
8343            lock.lock();
8344        }
8345
8346        // Validate bound memory range information
8347        auto mem_info = GetMemObjInfo(dev_data, mem);
8348        if (mem_info) {
8349            skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
8350                                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, "vkBindImageMemory()");
8351            skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
8352                                        VALIDATION_ERROR_1740082e);
8353        }
8354
8355        // Validate memory requirements alignment
8356        if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
8357            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8358                            image_handle, __LINE__, VALIDATION_ERROR_17400830, "DS",
8359                            "vkBindImageMemory(): memoryOffset is 0x%" PRIxLEAST64
8360                            " but must be an integer multiple of the "
8361                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
8362                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
8363                            memoryOffset, image_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_17400830]);
8364        }
8365
8366        // Validate memory requirements size
8367        if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
8368            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8369                            image_handle, __LINE__, VALIDATION_ERROR_17400832, "DS",
8370                            "vkBindImageMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
8371                            " but must be at least as large as "
8372                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
8373                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
8374                            mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size,
8375                            validation_error_map[VALIDATION_ERROR_17400832]);
8376        }
8377    }
8378    return skip;
8379}
8380
8381static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
8382                                          VkDeviceSize memoryOffset) {
8383    if (image_state) {
8384        unique_lock_t lock(global_lock);
8385        // Track bound memory range information
8386        auto mem_info = GetMemObjInfo(dev_data, mem);
8387        if (mem_info) {
8388            InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
8389                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
8390        }
8391
8392        // Track objects tied to memory
8393        uint64_t image_handle = HandleToUint64(image);
8394        SetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
8395
8396        image_state->binding.mem = mem;
8397        image_state->binding.offset = memoryOffset;
8398        image_state->binding.size = image_state->requirements.size;
8399    }
8400}
8401
8402VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
8403    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8404    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8405    auto image_state = GetImageState(dev_data, image);
8406    bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
8407    if (!skip) {
8408        result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
8409        if (result == VK_SUCCESS) {
8410            PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
8411        }
8412    }
8413    return result;
8414}
8415
8416VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
8417    bool skip = false;
8418    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8419    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8420    unique_lock_t lock(global_lock);
8421    auto event_state = GetEventNode(dev_data, event);
8422    if (event_state) {
8423        event_state->needsSignaled = false;
8424        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
8425        if (event_state->write_in_use) {
8426            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8427                            HandleToUint64(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8428                            "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
8429                            HandleToUint64(event));
8430        }
8431    }
8432    lock.unlock();
8433    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
8434    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
8435    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
8436    for (auto queue_data : dev_data->queueMap) {
8437        auto event_entry = queue_data.second.eventToStageMap.find(event);
8438        if (event_entry != queue_data.second.eventToStageMap.end()) {
8439            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
8440        }
8441    }
8442    if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
8443    return result;
8444}
8445
8446VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
8447                                               VkFence fence) {
8448    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8449    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8450    bool skip = false;
8451    unique_lock_t lock(global_lock);
8452    auto pFence = GetFenceNode(dev_data, fence);
8453    auto pQueue = GetQueueState(dev_data, queue);
8454
8455    // First verify that fence is not in use
8456    skip |= ValidateFenceForSubmit(dev_data, pFence);
8457
8458    if (pFence) {
8459        SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
8460    }
8461
8462    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
8463        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
8464        // Track objects tied to memory
8465        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
8466            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
8467                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
8468                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
8469                                        HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer))
8470                    skip = true;
8471            }
8472        }
8473        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
8474            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
8475                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
8476                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
8477                                        HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage))
8478                    skip = true;
8479            }
8480        }
8481        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
8482            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
8483                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
8484                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
8485                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
8486                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
8487                                        HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage))
8488                    skip = true;
8489            }
8490        }
8491
8492        std::vector<SEMAPHORE_WAIT> semaphore_waits;
8493        std::vector<VkSemaphore> semaphore_signals;
8494        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
8495            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
8496            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
8497            if (pSemaphore) {
8498                if (pSemaphore->signaled) {
8499                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
8500                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
8501                        pSemaphore->in_use.fetch_add(1);
8502                    }
8503                    pSemaphore->signaler.first = VK_NULL_HANDLE;
8504                    pSemaphore->signaled = false;
8505                } else {
8506                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
8507                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8508                                    "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64
8509                                    " that has no way to be signaled.",
8510                                    queue, HandleToUint64(semaphore));
8511                }
8512            }
8513        }
8514        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
8515            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
8516            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
8517            if (pSemaphore) {
8518                if (pSemaphore->signaled) {
8519                    skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
8520                                   HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8521                                   "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
8522                                   ", but that semaphore is already signaled.",
8523                                   queue, HandleToUint64(semaphore));
8524                } else {
8525                    pSemaphore->signaler.first = queue;
8526                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
8527                    pSemaphore->signaled = true;
8528                    pSemaphore->in_use.fetch_add(1);
8529                    semaphore_signals.push_back(semaphore);
8530                }
8531            }
8532        }
8533
8534        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals,
8535                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
8536    }
8537
8538    if (pFence && !bindInfoCount) {
8539        // No work to do, just dropping a fence in the queue by itself.
8540        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
8541                                         fence);
8542    }
8543
8544    lock.unlock();
8545
8546    if (!skip) return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
8547
8548    return result;
8549}
8550
8551VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
8552                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
8553    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8554    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
8555    if (result == VK_SUCCESS) {
8556        lock_guard_t lock(global_lock);
8557        SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
8558        sNode->signaler.first = VK_NULL_HANDLE;
8559        sNode->signaler.second = 0;
8560        sNode->signaled = false;
8561    }
8562    return result;
8563}
8564
8565VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
8566                                           const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
8567    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8568    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
8569    if (result == VK_SUCCESS) {
8570        lock_guard_t lock(global_lock);
8571        dev_data->eventMap[*pEvent].needsSignaled = false;
8572        dev_data->eventMap[*pEvent].write_in_use = 0;
8573        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
8574    }
8575    return result;
8576}
8577
8578static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
8579                                              VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
8580                                              SWAPCHAIN_NODE *old_swapchain_state) {
8581    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
8582
8583    // TODO: revisit this. some of these rules are being relaxed.
8584
8585    // All physical devices and queue families are required to be able
8586    // to present to any native window on Android; require the
8587    // application to have established support on any other platform.
8588    if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
8589        auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::const_reference qs) -> bool {
8590            // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
8591            return (qs.first.gpu == dev_data->physical_device) && qs.second;
8592        };
8593        const auto& support = surface_state->gpu_queue_support;
8594        bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
8595
8596        if (!is_supported) {
8597            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8598                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ec, "DS",
8599                        "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. "
8600                        "The vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support "
8601                        "with this surface for at least one queue family of this device. %s",
8602                        func_name, validation_error_map[VALIDATION_ERROR_146009ec]))
8603                return true;
8604        }
8605    }
8606
8607    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
8608        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8609                    HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
8610                    "%s: surface has an existing swapchain other than oldSwapchain", func_name))
8611            return true;
8612    }
8613    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
8614        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8615                    HandleToUint64(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE, "DS",
8616                    "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
8617            return true;
8618    }
8619    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
8620    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
8621        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
8622                    HandleToUint64(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8623                    "%s: surface capabilities not retrieved for this physical device", func_name))
8624            return true;
8625    } else {  // have valid capabilities
8626        auto &capabilities = physical_device_state->surfaceCapabilities;
8627        // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
8628        if (pCreateInfo->minImageCount < capabilities.minImageCount) {
8629            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8630                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ee, "DS",
8631                        "%s called with minImageCount = %d, which is outside the bounds returned "
8632                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
8633                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
8634                        validation_error_map[VALIDATION_ERROR_146009ee]))
8635                return true;
8636        }
8637
8638        if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
8639            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8640                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f0, "DS",
8641                        "%s called with minImageCount = %d, which is outside the bounds returned "
8642                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
8643                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
8644                        validation_error_map[VALIDATION_ERROR_146009f0]))
8645                return true;
8646        }
8647
8648        // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
8649        if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
8650            ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
8651             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
8652             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
8653             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
8654            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8655                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f4, "DS",
8656                        "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
8657                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
8658                        "maxImageExtent = (%d,%d). %s",
8659                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
8660                        capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
8661                        capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
8662                        validation_error_map[VALIDATION_ERROR_146009f4]))
8663                return true;
8664        }
8665        // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
8666        // VkSurfaceCapabilitiesKHR::supportedTransforms.
8667        if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
8668            !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
8669            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
8670            // it up a little at a time, and then log it:
8671            std::string errorString = "";
8672            char str[1024];
8673            // Here's the first part of the message:
8674            sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
8675                    string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
8676            errorString += str;
8677            for (int i = 0; i < 32; i++) {
8678                // Build up the rest of the message:
8679                if ((1 << i) & capabilities.supportedTransforms) {
8680                    const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
8681                    sprintf(str, "    %s\n", newStr);
8682                    errorString += str;
8683                }
8684            }
8685            // Log the message that we've built up:
8686            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8687                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009fe, "DS", "%s. %s", errorString.c_str(),
8688                        validation_error_map[VALIDATION_ERROR_146009fe]))
8689                return true;
8690        }
8691
8692        // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
8693        // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
8694        if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
8695            !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
8696            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
8697            // it up a little at a time, and then log it:
8698            std::string errorString = "";
8699            char str[1024];
8700            // Here's the first part of the message:
8701            sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
8702                    func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
8703            errorString += str;
8704            for (int i = 0; i < 32; i++) {
8705                // Build up the rest of the message:
8706                if ((1 << i) & capabilities.supportedCompositeAlpha) {
8707                    const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
8708                    sprintf(str, "    %s\n", newStr);
8709                    errorString += str;
8710                }
8711            }
8712            // Log the message that we've built up:
8713            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8714                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a00, "DS", "%s. %s", errorString.c_str(),
8715                        validation_error_map[VALIDATION_ERROR_14600a00]))
8716                return true;
8717        }
8718        // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
8719        if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
8720            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8721                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f6, "DS",
8722                        "%s called with a non-supported imageArrayLayers (i.e. %d).  Minimum value is 1, maximum value is %d. %s",
8723                        func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
8724                        validation_error_map[VALIDATION_ERROR_146009f6]))
8725                return true;
8726        }
8727        // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
8728        if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
8729            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8730                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f8, "DS",
8731                        "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
8732                        func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
8733                        validation_error_map[VALIDATION_ERROR_146009f8]))
8734                return true;
8735        }
8736    }
8737
8738    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
8739    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
8740        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8741                    HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8742                    "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
8743            return true;
8744    } else {
8745        // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
8746        bool foundFormat = false;
8747        bool foundColorSpace = false;
8748        bool foundMatch = false;
8749        for (auto const &format : physical_device_state->surface_formats) {
8750            if (pCreateInfo->imageFormat == format.format) {
8751                // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
8752                foundFormat = true;
8753                if (pCreateInfo->imageColorSpace == format.colorSpace) {
8754                    foundMatch = true;
8755                    break;
8756                }
8757            } else {
8758                if (pCreateInfo->imageColorSpace == format.colorSpace) {
8759                    foundColorSpace = true;
8760                }
8761            }
8762        }
8763        if (!foundMatch) {
8764            if (!foundFormat) {
8765                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8766                            HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
8767                            "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s", func_name,
8768                            pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_146009f2]))
8769                    return true;
8770            }
8771            if (!foundColorSpace) {
8772                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8773                            HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
8774                            "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s", func_name,
8775                            pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_146009f2]))
8776                    return true;
8777            }
8778        }
8779    }
8780
8781    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
8782    if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
8783        // FIFO is required to always be supported
8784        if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
8785            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8786                        HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8787                        "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
8788                return true;
8789        }
8790    } else {
8791        // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
8792        bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
8793                                    pCreateInfo->presentMode) != physical_device_state->present_modes.end();
8794        if (!foundMatch) {
8795            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8796                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a02, "DS",
8797                        "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
8798                        string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_14600a02]))
8799                return true;
8800        }
8801    }
8802    // Validate state for shared presentable case
8803    if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
8804        VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
8805        if (!dev_data->extensions.vk_khr_shared_presentable_image) {
8806            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8807                        HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_EXTENSION_NOT_ENABLED, "DS",
8808                        "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
8809                        "been enabled.",
8810                        func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
8811                return true;
8812        } else if (pCreateInfo->minImageCount != 1) {
8813            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8814                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_14600ace, "DS",
8815                        "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
8816                        "must be 1. %s",
8817                        func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount,
8818                        validation_error_map[VALIDATION_ERROR_14600ace]))
8819                return true;
8820        }
8821    }
8822
8823    return false;
8824}
8825
8826static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
8827                                             VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
8828                                             SWAPCHAIN_NODE *old_swapchain_state) {
8829    if (VK_SUCCESS == result) {
8830        lock_guard_t lock(global_lock);
8831        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
8832        if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
8833            VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
8834            swapchain_state->shared_presentable = true;
8835        }
8836        surface_state->swapchain = swapchain_state.get();
8837        dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
8838    } else {
8839        surface_state->swapchain = nullptr;
8840    }
8841    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
8842    if (old_swapchain_state) {
8843        old_swapchain_state->replaced = true;
8844    }
8845    surface_state->old_swapchain = old_swapchain_state;
8846    return;
8847}
8848
8849VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
8850                                                  const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
8851    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8852    auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
8853    auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
8854
8855    if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
8856        return VK_ERROR_VALIDATION_FAILED_EXT;
8857    }
8858
8859    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
8860
8861    PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
8862
8863    return result;
8864}
8865
8866VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
8867    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8868    bool skip = false;
8869
8870    unique_lock_t lock(global_lock);
8871    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
8872    if (swapchain_data) {
8873        if (swapchain_data->images.size() > 0) {
8874            for (auto swapchain_image : swapchain_data->images) {
8875                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
8876                if (image_sub != dev_data->imageSubresourceMap.end()) {
8877                    for (auto imgsubpair : image_sub->second) {
8878                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
8879                        if (image_item != dev_data->imageLayoutMap.end()) {
8880                            dev_data->imageLayoutMap.erase(image_item);
8881                        }
8882                    }
8883                    dev_data->imageSubresourceMap.erase(image_sub);
8884                }
8885                skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
8886                dev_data->imageMap.erase(swapchain_image);
8887            }
8888        }
8889
8890        auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
8891        if (surface_state) {
8892            if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
8893            if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
8894        }
8895
8896        dev_data->swapchainMap.erase(swapchain);
8897    }
8898    lock.unlock();
8899    if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
8900}
8901
8902static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
8903                                                 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
8904    bool skip = false;
8905    if (swapchain_state && pSwapchainImages) {
8906        lock_guard_t lock(global_lock);
8907        // Compare the preliminary value of *pSwapchainImageCount with the value this time:
8908        if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
8909            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8910                            HandleToUint64(device), __LINE__, SWAPCHAIN_PRIOR_COUNT, "DS",
8911                            "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive "
8912                            "value has been seen for pSwapchainImages.");
8913        } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
8914            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8915                            HandleToUint64(device), __LINE__, SWAPCHAIN_INVALID_COUNT, "DS",
8916                            "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with "
8917                            "pSwapchainImages set to a value (%d) that is greater than the value (%d) that was returned when "
8918                            "pSwapchainImageCount was NULL.",
8919                            *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
8920        }
8921    }
8922    return skip;
8923}
8924
8925static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
8926                                                uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
8927    lock_guard_t lock(global_lock);
8928
8929    if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
8930
8931    if (pSwapchainImages) {
8932        if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
8933            swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
8934        }
8935        for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
8936            if (swapchain_state->images[i] != VK_NULL_HANDLE) continue;  // Already retrieved this.
8937
8938            IMAGE_LAYOUT_NODE image_layout_node;
8939            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
8940            image_layout_node.format = swapchain_state->createInfo.imageFormat;
8941            // Add imageMap entries for each swapchain image
8942            VkImageCreateInfo image_ci = {};
8943            image_ci.flags = 0;
8944            image_ci.imageType = VK_IMAGE_TYPE_2D;
8945            image_ci.format = swapchain_state->createInfo.imageFormat;
8946            image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
8947            image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
8948            image_ci.extent.depth = 1;
8949            image_ci.mipLevels = 1;
8950            image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
8951            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
8952            image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
8953            image_ci.usage = swapchain_state->createInfo.imageUsage;
8954            image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
8955            device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
8956            auto &image_state = device_data->imageMap[pSwapchainImages[i]];
8957            image_state->valid = false;
8958            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
8959            swapchain_state->images[i] = pSwapchainImages[i];
8960            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
8961            device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
8962            device_data->imageLayoutMap[subpair] = image_layout_node;
8963        }
8964    }
8965
8966    if (*pSwapchainImageCount) {
8967        if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
8968            swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
8969        }
8970        swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
8971    }
8972}
8973
8974VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
8975                                                     VkImage *pSwapchainImages) {
8976    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8977    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8978
8979    auto swapchain_state = GetSwapchainNode(device_data, swapchain);
8980    bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
8981
8982    if (!skip) {
8983        result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
8984    }
8985
8986    if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
8987        PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
8988    }
8989    return result;
8990}
8991
8992VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
8993    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8994    bool skip = false;
8995
8996    lock_guard_t lock(global_lock);
8997    auto queue_state = GetQueueState(dev_data, queue);
8998
8999    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9000        auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
9001        if (pSemaphore && !pSemaphore->signaled) {
9002            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
9003                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9004                            "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
9005                            HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
9006        }
9007    }
9008
9009    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9010        auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9011        if (swapchain_data) {
9012            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
9013                skip |=
9014                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9015                            HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9016                            "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
9017                            pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
9018            } else {
9019                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
9020                auto image_state = GetImageState(dev_data, image);
9021
9022                if (image_state->shared_presentable) {
9023                    image_state->layout_locked = true;
9024                }
9025
9026                skip |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
9027
9028                if (!image_state->acquired) {
9029                    skip |= log_msg(
9030                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9031                        HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED, "DS",
9032                        "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
9033                }
9034
9035                vector<VkImageLayout> layouts;
9036                if (FindLayouts(dev_data, image, layouts)) {
9037                    for (auto layout : layouts) {
9038                        if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) &&
9039                            (!dev_data->extensions.vk_khr_shared_presentable_image ||
9040                             (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
9041                            skip |=
9042                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
9043                                        HandleToUint64(queue), __LINE__, VALIDATION_ERROR_11200a20, "DS",
9044                                        "Images passed to present must be in layout "
9045                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s. %s",
9046                                        string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_11200a20]);
9047                        }
9048                    }
9049                }
9050            }
9051
9052            // All physical devices and queue families are required to be able
9053            // to present to any native window on Android; require the
9054            // application to have established support on any other platform.
9055            if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
9056                auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
9057                auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
9058
9059                if (support_it == surface_state->gpu_queue_support.end()) {
9060                    skip |=
9061                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9062                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS",
9063                                "vkQueuePresentKHR: Presenting image without calling "
9064                                "vkGetPhysicalDeviceSurfaceSupportKHR");
9065                } else if (!support_it->second) {
9066                    skip |=
9067                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9068                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_31800a18, "DS",
9069                                "vkQueuePresentKHR: Presenting image on queue that cannot "
9070                                "present to this surface. %s",
9071                                validation_error_map[VALIDATION_ERROR_31800a18]);
9072                }
9073            }
9074        }
9075    }
9076    if (pPresentInfo && pPresentInfo->pNext) {
9077        // Verify ext struct
9078        struct std_header {
9079            VkStructureType sType;
9080            const void *pNext;
9081        };
9082        std_header *pnext = (std_header *)pPresentInfo->pNext;
9083        while (pnext) {
9084            if (VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR == pnext->sType) {
9085                VkPresentRegionsKHR *present_regions = (VkPresentRegionsKHR *)pnext;
9086                for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
9087                    auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9088                    assert(swapchain_data);
9089                    VkPresentRegionKHR region = present_regions->pRegions[i];
9090                    for (uint32_t j = 0; j < region.rectangleCount; ++j) {
9091                        VkRectLayerKHR rect = region.pRectangles[j];
9092                        // TODO: Need to update these errors to their unique error ids when available
9093                        if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
9094                            skip |= log_msg(
9095                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9096                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9097                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
9098                                "chain, pRegion[%i].pRectangles[%i], the sum of offset.x "
9099                                "(%i) and extent.width (%i) is greater than the "
9100                                "corresponding swapchain's imageExtent.width (%i).",
9101                                i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
9102                        }
9103                        if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
9104                            skip |= log_msg(
9105                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9106                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9107                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
9108                                "chain, pRegion[%i].pRectangles[%i], the sum of offset.y "
9109                                "(%i) and extent.height (%i) is greater than the "
9110                                "corresponding swapchain's imageExtent.height (%i).",
9111                                i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
9112                        }
9113                        if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
9114                            skip |= log_msg(
9115                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9116                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9117                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the "
9118                                "layer (%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
9119                                i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
9120                        }
9121                    }
9122                }
9123            } else if (VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE == pnext->sType) {
9124                VkPresentTimesInfoGOOGLE *present_times_info = (VkPresentTimesInfoGOOGLE *)pnext;
9125                if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
9126                    skip |=
9127                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9128                                HandleToUint64(pPresentInfo->pSwapchains[0]), __LINE__,
9129
9130                                VALIDATION_ERROR_118009be, "DS",
9131                                "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but "
9132                                "pPresentInfo->swapchainCount is %i. For VkPresentTimesInfoGOOGLE down pNext "
9133                                "chain of VkPresentInfoKHR, VkPresentTimesInfoGOOGLE.swapchainCount "
9134                                "must equal VkPresentInfoKHR.swapchainCount.",
9135                                present_times_info->swapchainCount, pPresentInfo->swapchainCount);
9136                }
9137            }
9138            pnext = (std_header *)pnext->pNext;
9139        }
9140    }
9141
9142    if (skip) {
9143        return VK_ERROR_VALIDATION_FAILED_EXT;
9144    }
9145
9146    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
9147
9148    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
9149        // Semaphore waits occur before error generation, if the call reached
9150        // the ICD. (Confirm?)
9151        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9152            auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
9153            if (pSemaphore) {
9154                pSemaphore->signaler.first = VK_NULL_HANDLE;
9155                pSemaphore->signaled = false;
9156            }
9157        }
9158
9159        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9160            // Note: this is imperfect, in that we can get confused about what
9161            // did or didn't succeed-- but if the app does that, it's confused
9162            // itself just as much.
9163            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
9164
9165            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
9166
9167            // Mark the image as having been released to the WSI
9168            auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9169            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
9170            auto image_state = GetImageState(dev_data, image);
9171            image_state->acquired = false;
9172        }
9173
9174        // Note: even though presentation is directed to a queue, there is no
9175        // direct ordering between QP and subsequent work, so QP (and its
9176        // semaphore waits) /never/ participate in any completion proof.
9177    }
9178
9179    return result;
9180}
9181
9182static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
9183                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
9184                                                     std::vector<SURFACE_STATE *> &surface_state,
9185                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
9186    if (pCreateInfos) {
9187        lock_guard_t lock(global_lock);
9188        for (uint32_t i = 0; i < swapchainCount; i++) {
9189            surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
9190            old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
9191            std::stringstream func_name;
9192            func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
9193            if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
9194                                                  old_swapchain_state[i])) {
9195                return true;
9196            }
9197        }
9198    }
9199    return false;
9200}
9201
9202static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
9203                                                    const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
9204                                                    std::vector<SURFACE_STATE *> &surface_state,
9205                                                    std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
9206    if (VK_SUCCESS == result) {
9207        for (uint32_t i = 0; i < swapchainCount; i++) {
9208            auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
9209            if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
9210                VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
9211                swapchain_state->shared_presentable = true;
9212            }
9213            surface_state[i]->swapchain = swapchain_state.get();
9214            dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
9215        }
9216    } else {
9217        for (uint32_t i = 0; i < swapchainCount; i++) {
9218            surface_state[i]->swapchain = nullptr;
9219        }
9220    }
9221    // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
9222    for (uint32_t i = 0; i < swapchainCount; i++) {
9223        if (old_swapchain_state[i]) {
9224            old_swapchain_state[i]->replaced = true;
9225        }
9226        surface_state[i]->old_swapchain = old_swapchain_state[i];
9227    }
9228    return;
9229}
9230
9231VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
9232                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
9233                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
9234    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9235    std::vector<SURFACE_STATE *> surface_state;
9236    std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
9237
9238    if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
9239                                                 old_swapchain_state)) {
9240        return VK_ERROR_VALIDATION_FAILED_EXT;
9241    }
9242
9243    VkResult result =
9244        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
9245
9246    PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
9247                                            old_swapchain_state);
9248
9249    return result;
9250}
9251
9252VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
9253                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
9254    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9255    bool skip = false;
9256
9257    unique_lock_t lock(global_lock);
9258
9259    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
9260        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9261                        HandleToUint64(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
9262                        "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
9263                        "to determine the completion of this operation.");
9264    }
9265
9266    auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9267    if (pSemaphore && pSemaphore->signaled) {
9268        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9269                        HandleToUint64(semaphore), __LINE__, VALIDATION_ERROR_16400a0c, "DS",
9270                        "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
9271                        validation_error_map[VALIDATION_ERROR_16400a0c]);
9272    }
9273
9274    auto pFence = GetFenceNode(dev_data, fence);
9275    if (pFence) {
9276        skip |= ValidateFenceForSubmit(dev_data, pFence);
9277    }
9278
9279    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
9280
9281    if (swapchain_data->replaced) {
9282        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9283                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
9284                        "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
9285                        "present any images it has acquired, but cannot acquire any more.");
9286    }
9287
9288    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
9289    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
9290        uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
9291                                                 [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
9292        if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
9293            skip |=
9294                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9295                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
9296                        "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
9297                        acquired_images);
9298        }
9299    }
9300
9301    if (swapchain_data->images.size() == 0) {
9302        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9303                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
9304                        "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
9305                        "vkGetSwapchainImagesKHR after swapchain creation.");
9306    }
9307
9308    lock.unlock();
9309
9310    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9311
9312    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
9313
9314    lock.lock();
9315    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
9316        if (pFence) {
9317            pFence->state = FENCE_INFLIGHT;
9318            pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
9319        }
9320
9321        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
9322        if (pSemaphore) {
9323            pSemaphore->signaled = true;
9324            pSemaphore->signaler.first = VK_NULL_HANDLE;
9325        }
9326
9327        // Mark the image as acquired.
9328        auto image = swapchain_data->images[*pImageIndex];
9329        auto image_state = GetImageState(dev_data, image);
9330        image_state->acquired = true;
9331        image_state->shared_presentable = swapchain_data->shared_presentable;
9332    }
9333    lock.unlock();
9334
9335    return result;
9336}
9337
9338VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
9339                                                        VkPhysicalDevice *pPhysicalDevices) {
9340    bool skip = false;
9341    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9342    assert(instance_data);
9343
9344    // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
9345    if (NULL == pPhysicalDevices) {
9346        instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
9347    } else {
9348        if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
9349            // Flag warning here. You can call this without having queried the count, but it may not be
9350            // robust on platforms with multiple physical devices.
9351            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
9352                            0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9353                            "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
9354                            "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
9355        }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
9356        else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
9357            // Having actual count match count from app is not a requirement, so this can be a warning
9358            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9359                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9360                            "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
9361                            "supported by this instance is %u.",
9362                            *pPhysicalDeviceCount, instance_data->physical_devices_count);
9363        }
9364        instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
9365    }
9366    if (skip) {
9367        return VK_ERROR_VALIDATION_FAILED_EXT;
9368    }
9369    VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
9370    if (NULL == pPhysicalDevices) {
9371        instance_data->physical_devices_count = *pPhysicalDeviceCount;
9372    } else if (result == VK_SUCCESS) {  // Save physical devices
9373        for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
9374            auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
9375            phys_device_state.phys_device = pPhysicalDevices[i];
9376            // Init actual features for each physical device
9377            instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
9378        }
9379    }
9380    return result;
9381}
9382
9383// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
9384static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
9385                                                                 PHYSICAL_DEVICE_STATE *pd_state,
9386                                                                 uint32_t requested_queue_family_property_count, bool qfp_null,
9387                                                                 const char *caller_name) {
9388    bool skip = false;
9389    if (!qfp_null) {
9390        // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
9391        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
9392            skip |= log_msg(
9393                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9394                HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9395                "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
9396                "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
9397                caller_name, caller_name);
9398            // Then verify that pCount that is passed in on second call matches what was returned
9399        } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
9400            skip |= log_msg(
9401                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9402                HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9403                "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
9404                ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
9405                ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
9406                "previously obtained by calling %s with NULL pQueueFamilyProperties.",
9407                caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
9408        }
9409        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
9410    }
9411
9412    return skip;
9413}
9414
9415static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
9416                                                                  PHYSICAL_DEVICE_STATE *pd_state,
9417                                                                  uint32_t *pQueueFamilyPropertyCount,
9418                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
9419    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
9420                                                                (nullptr == pQueueFamilyProperties),
9421                                                                "vkGetPhysicalDeviceQueueFamilyProperties()");
9422}
9423
9424static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_layer_data *instance_data,
9425                                                                      PHYSICAL_DEVICE_STATE *pd_state,
9426                                                                      uint32_t *pQueueFamilyPropertyCount,
9427                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9428    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
9429                                                                (nullptr == pQueueFamilyProperties),
9430                                                                "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
9431}
9432
9433// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
9434static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9435                                                                    VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9436    if (!pQueueFamilyProperties) {
9437        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
9438            pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
9439        pd_state->queue_family_count = count;
9440    } else {  // Save queue family properties
9441        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
9442        pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
9443
9444        pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
9445        for (uint32_t i = 0; i < count; ++i) {
9446            pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
9447        }
9448    }
9449}
9450
9451static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9452                                                                 VkQueueFamilyProperties *pQueueFamilyProperties) {
9453    VkQueueFamilyProperties2KHR *pqfp = nullptr;
9454    std::vector<VkQueueFamilyProperties2KHR> qfp;
9455    qfp.resize(count);
9456    if (pQueueFamilyProperties) {
9457        for (uint32_t i = 0; i < count; ++i) {
9458            qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
9459            qfp[i].pNext = nullptr;
9460            qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
9461        }
9462        pqfp = qfp.data();
9463    }
9464    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
9465}
9466
9467static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9468                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9469    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
9470}
9471
9472VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
9473                                                                  uint32_t *pQueueFamilyPropertyCount,
9474                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
9475    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9476    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9477    assert(physical_device_state);
9478    unique_lock_t lock(global_lock);
9479
9480    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
9481                                                                      pQueueFamilyPropertyCount, pQueueFamilyProperties);
9482
9483    lock.unlock();
9484
9485    if (skip) return;
9486
9487    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
9488                                                                         pQueueFamilyProperties);
9489
9490    lock.lock();
9491    PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
9492}
9493
9494VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
9495                                                                      uint32_t *pQueueFamilyPropertyCount,
9496                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9497    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9498    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9499    assert(physical_device_state);
9500    unique_lock_t lock(global_lock);
9501
9502    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_data, physical_device_state,
9503                                                                          pQueueFamilyPropertyCount, pQueueFamilyProperties);
9504
9505    lock.unlock();
9506
9507    if (skip) return;
9508
9509    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
9510                                                                             pQueueFamilyProperties);
9511
9512    lock.lock();
9513    PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(physical_device_state, *pQueueFamilyPropertyCount,
9514                                                             pQueueFamilyProperties);
9515}
9516
9517template <typename TCreateInfo, typename FPtr>
9518static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
9519                              VkSurfaceKHR *pSurface, FPtr fptr) {
9520    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9521
9522    // Call down the call chain:
9523    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
9524
9525    if (result == VK_SUCCESS) {
9526        unique_lock_t lock(global_lock);
9527        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
9528        lock.unlock();
9529    }
9530
9531    return result;
9532}
9533
9534VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
9535    bool skip = false;
9536    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9537    unique_lock_t lock(global_lock);
9538    auto surface_state = GetSurfaceState(instance_data, surface);
9539
9540    if ((surface_state) && (surface_state->swapchain)) {
9541        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
9542            HandleToUint64(instance), __LINE__, VALIDATION_ERROR_26c009e4, "DS",
9543            "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed. %s",
9544            validation_error_map[VALIDATION_ERROR_26c009e4]);
9545    }
9546    instance_data->surface_map.erase(surface);
9547    lock.unlock();
9548    if (!skip) {
9549        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
9550    }
9551}
9552
9553VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
9554                                                            const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9555    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
9556}
9557
9558#ifdef VK_USE_PLATFORM_ANDROID_KHR
9559VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
9560                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9561    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
9562}
9563#endif  // VK_USE_PLATFORM_ANDROID_KHR
9564
9565#ifdef VK_USE_PLATFORM_MIR_KHR
9566VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
9567                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9568    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
9569}
9570
9571VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9572                                                                          uint32_t queueFamilyIndex, MirConnection *connection) {
9573    bool skip = false;
9574    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9575
9576    unique_lock_t lock(global_lock);
9577    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9578
9579    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2d2009e2,
9580                                              "vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
9581
9582    lock.unlock();
9583
9584    if (skip) return VK_FALSE;
9585
9586    // Call down the call chain:
9587    VkBool32 result =
9588        instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
9589
9590    return result;
9591}
9592#endif  // VK_USE_PLATFORM_MIR_KHR
9593
9594#ifdef VK_USE_PLATFORM_WAYLAND_KHR
9595VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
9596                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9597    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
9598}
9599
9600VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9601                                                                              uint32_t queueFamilyIndex,
9602                                                                              struct wl_display *display) {
9603    bool skip = false;
9604    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9605
9606    unique_lock_t lock(global_lock);
9607    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9608
9609    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f000a34,
9610                                              "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
9611
9612    lock.unlock();
9613
9614    if (skip) return VK_FALSE;
9615
9616    // Call down the call chain:
9617    VkBool32 result =
9618        instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
9619
9620    return result;
9621}
9622#endif  // VK_USE_PLATFORM_WAYLAND_KHR
9623
9624#ifdef VK_USE_PLATFORM_WIN32_KHR
9625VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
9626                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9627    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
9628}
9629
9630VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
9631                                                                            uint32_t queueFamilyIndex) {
9632    bool skip = false;
9633    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9634
9635    unique_lock_t lock(global_lock);
9636    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9637
9638    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f200a3a,
9639                                              "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
9640
9641    lock.unlock();
9642
9643    if (skip) return VK_FALSE;
9644
9645    // Call down the call chain:
9646    VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
9647
9648    return result;
9649}
9650#endif  // VK_USE_PLATFORM_WIN32_KHR
9651
9652#ifdef VK_USE_PLATFORM_XCB_KHR
9653VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
9654                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9655    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
9656}
9657
9658VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9659                                                                          uint32_t queueFamilyIndex, xcb_connection_t *connection,
9660                                                                          xcb_visualid_t visual_id) {
9661    bool skip = false;
9662    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9663
9664    unique_lock_t lock(global_lock);
9665    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9666
9667    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f400a40,
9668                                              "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
9669
9670    lock.unlock();
9671
9672    if (skip) return VK_FALSE;
9673
9674    // Call down the call chain:
9675    VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
9676                                                                                               connection, visual_id);
9677
9678    return result;
9679}
9680#endif  // VK_USE_PLATFORM_XCB_KHR
9681
9682#ifdef VK_USE_PLATFORM_XLIB_KHR
9683VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
9684                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9685    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
9686}
9687
9688VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9689                                                                           uint32_t queueFamilyIndex, Display *dpy,
9690                                                                           VisualID visualID) {
9691    bool skip = false;
9692    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9693
9694    unique_lock_t lock(global_lock);
9695    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9696
9697    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f600a46,
9698                                              "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
9699
9700    lock.unlock();
9701
9702    if (skip) return VK_FALSE;
9703
9704    // Call down the call chain:
9705    VkBool32 result =
9706        instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
9707
9708    return result;
9709}
9710#endif  // VK_USE_PLATFORM_XLIB_KHR
9711
9712VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9713                                                                       VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
9714    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9715
9716    unique_lock_t lock(global_lock);
9717    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9718    lock.unlock();
9719
9720    auto result =
9721        instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
9722
9723    if (result == VK_SUCCESS) {
9724        physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9725        physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
9726    }
9727
9728    return result;
9729}
9730
9731static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
9732                                                                   VkPhysicalDevice physicalDevice,
9733                                                                   VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
9734    unique_lock_t lock(global_lock);
9735    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9736    physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9737    physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
9738}
9739
9740VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
9741                                                                        const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
9742                                                                        VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
9743    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9744
9745    auto result =
9746        instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
9747
9748    if (result == VK_SUCCESS) {
9749        PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
9750    }
9751
9752    return result;
9753}
9754
9755static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
9756                                                                   VkPhysicalDevice physicalDevice,
9757                                                                   VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
9758    unique_lock_t lock(global_lock);
9759    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9760    physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9761    physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
9762    physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
9763    physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
9764    physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
9765    physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
9766    physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
9767    physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
9768    physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
9769    physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
9770    physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
9771}
9772
9773VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9774                                                                        VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
9775    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9776
9777    auto result =
9778        instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
9779
9780    if (result == VK_SUCCESS) {
9781        PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
9782    }
9783
9784    return result;
9785}
9786
9787VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
9788                                                                  VkSurfaceKHR surface, VkBool32 *pSupported) {
9789    bool skip = false;
9790    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9791
9792    unique_lock_t lock(global_lock);
9793    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9794    auto surface_state = GetSurfaceState(instance_data, surface);
9795
9796    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2ee009ea,
9797                                              "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
9798
9799    lock.unlock();
9800
9801    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9802
9803    auto result =
9804        instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
9805
9806    if (result == VK_SUCCESS) {
9807        surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
9808    }
9809
9810    return result;
9811}
9812
9813VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9814                                                                       uint32_t *pPresentModeCount,
9815                                                                       VkPresentModeKHR *pPresentModes) {
9816    bool skip = false;
9817    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9818    unique_lock_t lock(global_lock);
9819    // TODO: this isn't quite right. available modes may differ by surface AND physical device.
9820    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9821    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
9822
9823    if (pPresentModes) {
9824        // Compare the preliminary value of *pPresentModeCount with the value this time:
9825        auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
9826        switch (call_state) {
9827            case UNCALLED:
9828                skip |= log_msg(
9829                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9830                    HandleToUint64(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
9831                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
9832                    "value has been seen for pPresentModeCount.");
9833                break;
9834            default:
9835                // both query count and query details
9836                if (*pPresentModeCount != prev_mode_count) {
9837                    skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9838                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
9839                                    DEVLIMITS_COUNT_MISMATCH, "DL",
9840                                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that "
9841                                    "differs from the value "
9842                                    "(%u) that was returned when pPresentModes was NULL.",
9843                                    *pPresentModeCount, prev_mode_count);
9844                }
9845                break;
9846        }
9847    }
9848    lock.unlock();
9849
9850    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9851
9852    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
9853                                                                                        pPresentModes);
9854
9855    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9856        lock.lock();
9857
9858        if (*pPresentModeCount) {
9859            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
9860            if (*pPresentModeCount > physical_device_state->present_modes.size())
9861                physical_device_state->present_modes.resize(*pPresentModeCount);
9862        }
9863        if (pPresentModes) {
9864            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
9865            for (uint32_t i = 0; i < *pPresentModeCount; i++) {
9866                physical_device_state->present_modes[i] = pPresentModes[i];
9867            }
9868        }
9869    }
9870
9871    return result;
9872}
9873
9874VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9875                                                                  uint32_t *pSurfaceFormatCount,
9876                                                                  VkSurfaceFormatKHR *pSurfaceFormats) {
9877    bool skip = false;
9878    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9879    unique_lock_t lock(global_lock);
9880    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9881    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
9882
9883    if (pSurfaceFormats) {
9884        auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
9885
9886        switch (call_state) {
9887            case UNCALLED:
9888                // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
9889                // didn't
9890                // previously call this function with a NULL value of pSurfaceFormats:
9891                skip |= log_msg(
9892                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9893                    HandleToUint64(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
9894                    "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
9895                    "value has been seen for pSurfaceFormats.");
9896                break;
9897            default:
9898                if (prev_format_count != *pSurfaceFormatCount) {
9899                    skip |= log_msg(
9900                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9901                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
9902                        DEVLIMITS_COUNT_MISMATCH, "DL",
9903                        "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats "
9904                        "set "
9905                        "to "
9906                        "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
9907                        *pSurfaceFormatCount, prev_format_count);
9908                }
9909                break;
9910        }
9911    }
9912    lock.unlock();
9913
9914    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9915
9916    // Call down the call chain:
9917    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
9918                                                                                   pSurfaceFormats);
9919
9920    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9921        lock.lock();
9922
9923        if (*pSurfaceFormatCount) {
9924            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
9925            if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
9926                physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
9927        }
9928        if (pSurfaceFormats) {
9929            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
9930            for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
9931                physical_device_state->surface_formats[i] = pSurfaceFormats[i];
9932            }
9933        }
9934    }
9935    return result;
9936}
9937
9938static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
9939                                                              uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
9940    unique_lock_t lock(global_lock);
9941    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9942    if (*pSurfaceFormatCount) {
9943        if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
9944            physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
9945        }
9946        if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
9947            physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
9948    }
9949    if (pSurfaceFormats) {
9950        if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
9951            physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
9952        }
9953        for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
9954            physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
9955        }
9956    }
9957}
9958
9959VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
9960                                                                   const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
9961                                                                   uint32_t *pSurfaceFormatCount,
9962                                                                   VkSurfaceFormat2KHR *pSurfaceFormats) {
9963    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9964    auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
9965                                                                                   pSurfaceFormatCount, pSurfaceFormats);
9966    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9967        PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
9968    }
9969    return result;
9970}
9971
9972VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
9973                                                            const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
9974                                                            const VkAllocationCallbacks *pAllocator,
9975                                                            VkDebugReportCallbackEXT *pMsgCallback) {
9976    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9977    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
9978    if (VK_SUCCESS == res) {
9979        lock_guard_t lock(global_lock);
9980        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
9981    }
9982    return res;
9983}
9984
9985VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
9986                                                         const VkAllocationCallbacks *pAllocator) {
9987    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9988    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
9989    lock_guard_t lock(global_lock);
9990    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
9991}
9992
9993VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
9994                                                 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
9995                                                 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
9996    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9997    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
9998}
9999
10000VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10001    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10002}
10003
10004VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
10005                                                              VkLayerProperties *pProperties) {
10006    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10007}
10008
10009VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
10010                                                                    VkExtensionProperties *pProperties) {
10011    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
10012        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
10013
10014    return VK_ERROR_LAYER_NOT_PRESENT;
10015}
10016
10017VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
10018                                                                  uint32_t *pCount, VkExtensionProperties *pProperties) {
10019    if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(0, NULL, pCount, pProperties);
10020
10021    assert(physicalDevice);
10022
10023    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10024    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
10025}
10026
10027VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHX(
10028    VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHX *pPhysicalDeviceGroupProperties) {
10029    bool skip = false;
10030    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10031
10032    if (instance_data) {
10033        // For this instance, flag when EnumeratePhysicalDeviceGroupsKHX goes to QUERY_COUNT and then QUERY_DETAILS.
10034        if (NULL == pPhysicalDeviceGroupProperties) {
10035            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
10036        } else {
10037            if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
10038                // Flag warning here. You can call this without having queried the count, but it may not be
10039                // robust on platforms with multiple physical devices.
10040                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10041                                VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
10042                                "Call sequence has vkEnumeratePhysicalDeviceGroupsKHX() w/ non-NULL "
10043                                "pPhysicalDeviceGroupProperties. You should first "
10044                                "call vkEnumeratePhysicalDeviceGroupsKHX() w/ NULL pPhysicalDeviceGroupProperties to query "
10045                                "pPhysicalDeviceGroupCount.");
10046            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
10047            else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
10048                // Having actual count match count from app is not a requirement, so this can be a warning
10049                skip |=
10050                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10051                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
10052                            "Call to vkEnumeratePhysicalDeviceGroupsKHX() w/ pPhysicalDeviceGroupCount value %u, but actual count "
10053                            "supported by this instance is %u.",
10054                            *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
10055            }
10056            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
10057        }
10058        if (skip) {
10059            return VK_ERROR_VALIDATION_FAILED_EXT;
10060        }
10061        VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHX(instance, pPhysicalDeviceGroupCount,
10062            pPhysicalDeviceGroupProperties);
10063        if (NULL == pPhysicalDeviceGroupProperties) {
10064            instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
10065        } else if (result == VK_SUCCESS) { // Save physical devices
10066            for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
10067                for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
10068                    VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
10069                    auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
10070                    phys_device_state.phys_device = cur_phys_dev;
10071                    // Init actual features for each physical device
10072                    instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
10073                }
10074            }
10075        }
10076        return result;
10077    } else {
10078        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
10079                DEVLIMITS_INVALID_INSTANCE, "DL",
10080                "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDeviceGroupsKHX().",
10081                HandleToUint64(instance));
10082    }
10083    return VK_ERROR_VALIDATION_FAILED_EXT;
10084}
10085
10086VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
10087                                                                 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
10088                                                                 const VkAllocationCallbacks *pAllocator,
10089                                                                 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
10090    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10091    VkResult result =
10092        dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
10093    if (VK_SUCCESS == result) {
10094        lock_guard_t lock(global_lock);
10095        // Shadow template createInfo for later updates
10096        safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info =
10097            new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo);
10098        std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
10099        dev_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
10100    }
10101    return result;
10102}
10103
10104VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
10105                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10106                                                              const VkAllocationCallbacks *pAllocator) {
10107    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10108    unique_lock_t lock(global_lock);
10109    dev_data->desc_template_map.erase(descriptorUpdateTemplate);
10110    lock.unlock();
10111    dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
10112}
10113
10114// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
10115static void PostCallRecordUpdateDescriptorSetWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
10116                                                             VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10117                                                             const void *pData) {
10118    auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
10119    if (template_map_entry == device_data->desc_template_map.end()) {
10120        assert(0);
10121    }
10122
10123    cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
10124}
10125
10126VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
10127                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10128                                                              const void *pData) {
10129    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10130    device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
10131
10132    PostCallRecordUpdateDescriptorSetWithTemplateKHR(device_data, descriptorSet, descriptorUpdateTemplate, pData);
10133}
10134
10135VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
10136                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10137                                                               VkPipelineLayout layout, uint32_t set, const void *pData) {
10138    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10139    dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
10140}
10141
10142static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
10143                                                                     VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
10144                                                                     VkDisplayPlanePropertiesKHR *pProperties) {
10145    unique_lock_t lock(global_lock);
10146    auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
10147
10148    if (*pPropertyCount) {
10149        if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
10150            physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
10151        }
10152        physical_device_state->display_plane_property_count = *pPropertyCount;
10153    }
10154    if (pProperties) {
10155        if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
10156            physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
10157        }
10158    }
10159}
10160
10161VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
10162                                                                          VkDisplayPlanePropertiesKHR *pProperties) {
10163    VkResult result = VK_SUCCESS;
10164    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10165
10166    result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
10167
10168    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
10169        PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
10170    }
10171
10172    return result;
10173}
10174
10175static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
10176                                                                    VkPhysicalDevice physicalDevice, uint32_t planeIndex,
10177                                                                    const char *api_name) {
10178    bool skip = false;
10179    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
10180    if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
10181        skip |= log_msg(
10182            instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10183            HandleToUint64(physicalDevice), __LINE__, SWAPCHAIN_GET_SUPPORTED_DISPLAYS_WITHOUT_QUERY, "DL",
10184            "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR.", api_name);
10185    } else {
10186        if (planeIndex >= physical_device_state->display_plane_property_count) {
10187            skip |= log_msg(
10188                instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10189                HandleToUint64(physicalDevice), __LINE__, VALIDATION_ERROR_29c009c2, "DL",
10190                "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR. "
10191                "Do you have the plane index hardcoded? %s",
10192                api_name, physical_device_state->display_plane_property_count - 1, validation_error_map[VALIDATION_ERROR_29c009c2]);
10193        }
10194    }
10195    return skip;
10196}
10197
10198static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
10199                                                               uint32_t planeIndex) {
10200    bool skip = false;
10201    lock_guard_t lock(global_lock);
10202    skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
10203                                                                    "vkGetDisplayPlaneSupportedDisplaysKHR");
10204    return skip;
10205}
10206
10207VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
10208                                                                   uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
10209    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10210    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10211    bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
10212    if (!skip) {
10213        result =
10214            instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
10215    }
10216    return result;
10217}
10218
10219static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
10220                                                          uint32_t planeIndex) {
10221    bool skip = false;
10222    lock_guard_t lock(global_lock);
10223    skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
10224                                                                    "vkGetDisplayPlaneCapabilitiesKHR");
10225    return skip;
10226}
10227
10228VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
10229                                                              uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
10230    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10231    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10232    bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
10233
10234    if (!skip) {
10235        result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
10236    }
10237
10238    return result;
10239}
10240
10241VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
10242    std::unique_lock<std::mutex> lock(global_lock);
10243    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10244    if (pNameInfo->pObjectName) {
10245        device_data->report_data->debugObjectNameMap->insert(
10246            std::make_pair<uint64_t, std::string>((uint64_t &&)pNameInfo->object, pNameInfo->pObjectName));
10247    } else {
10248        device_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
10249    }
10250    lock.unlock();
10251    VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
10252    return result;
10253}
10254
10255VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
10256    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10257    VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
10258    return result;
10259}
10260
10261VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
10262    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10263    device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
10264}
10265
10266VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
10267    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10268    device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
10269}
10270
10271VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
10272    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10273    device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
10274}
10275
10276VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
10277VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
10278VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
10279
10280// Map of all APIs to be intercepted by this layer
10281static const std::unordered_map<std::string, void*> name_to_funcptr_map = {
10282    {"vkGetInstanceProcAddr", (void*)GetInstanceProcAddr},
10283    {"vk_layerGetPhysicalDeviceProcAddr", (void*)GetPhysicalDeviceProcAddr},
10284    {"vkGetDeviceProcAddr", (void*)GetDeviceProcAddr},
10285    {"vkCreateInstance", (void*)CreateInstance},
10286    {"vkCreateDevice", (void*)CreateDevice},
10287    {"vkEnumeratePhysicalDevices", (void*)EnumeratePhysicalDevices},
10288    {"vkGetPhysicalDeviceQueueFamilyProperties", (void*)GetPhysicalDeviceQueueFamilyProperties},
10289    {"vkDestroyInstance", (void*)DestroyInstance},
10290    {"vkEnumerateInstanceLayerProperties", (void*)EnumerateInstanceLayerProperties},
10291    {"vkEnumerateDeviceLayerProperties", (void*)EnumerateDeviceLayerProperties},
10292    {"vkEnumerateInstanceExtensionProperties", (void*)EnumerateInstanceExtensionProperties},
10293    {"vkEnumerateDeviceExtensionProperties", (void*)EnumerateDeviceExtensionProperties},
10294    {"vkCreateDescriptorUpdateTemplateKHR", (void*)CreateDescriptorUpdateTemplateKHR},
10295    {"vkDestroyDescriptorUpdateTemplateKHR", (void*)DestroyDescriptorUpdateTemplateKHR},
10296    {"vkUpdateDescriptorSetWithTemplateKHR", (void*)UpdateDescriptorSetWithTemplateKHR},
10297    {"vkCmdPushDescriptorSetWithTemplateKHR", (void*)CmdPushDescriptorSetWithTemplateKHR},
10298    {"vkCreateSwapchainKHR", (void*)CreateSwapchainKHR},
10299    {"vkDestroySwapchainKHR", (void*)DestroySwapchainKHR},
10300    {"vkGetSwapchainImagesKHR", (void*)GetSwapchainImagesKHR},
10301    {"vkAcquireNextImageKHR", (void*)AcquireNextImageKHR},
10302    {"vkQueuePresentKHR", (void*)QueuePresentKHR},
10303    {"vkQueueSubmit", (void*)QueueSubmit},
10304    {"vkWaitForFences", (void*)WaitForFences},
10305    {"vkGetFenceStatus", (void*)GetFenceStatus},
10306    {"vkQueueWaitIdle", (void*)QueueWaitIdle},
10307    {"vkDeviceWaitIdle", (void*)DeviceWaitIdle},
10308    {"vkGetDeviceQueue", (void*)GetDeviceQueue},
10309    {"vkDestroyDevice", (void*)DestroyDevice},
10310    {"vkDestroyFence", (void*)DestroyFence},
10311    {"vkResetFences", (void*)ResetFences},
10312    {"vkDestroySemaphore", (void*)DestroySemaphore},
10313    {"vkDestroyEvent", (void*)DestroyEvent},
10314    {"vkDestroyQueryPool", (void*)DestroyQueryPool},
10315    {"vkDestroyBuffer", (void*)DestroyBuffer},
10316    {"vkDestroyBufferView", (void*)DestroyBufferView},
10317    {"vkDestroyImage", (void*)DestroyImage},
10318    {"vkDestroyImageView", (void*)DestroyImageView},
10319    {"vkDestroyShaderModule", (void*)DestroyShaderModule},
10320    {"vkDestroyPipeline", (void*)DestroyPipeline},
10321    {"vkDestroyPipelineLayout", (void*)DestroyPipelineLayout},
10322    {"vkDestroySampler", (void*)DestroySampler},
10323    {"vkDestroyDescriptorSetLayout", (void*)DestroyDescriptorSetLayout},
10324    {"vkDestroyDescriptorPool", (void*)DestroyDescriptorPool},
10325    {"vkDestroyFramebuffer", (void*)DestroyFramebuffer},
10326    {"vkDestroyRenderPass", (void*)DestroyRenderPass},
10327    {"vkCreateBuffer", (void*)CreateBuffer},
10328    {"vkCreateBufferView", (void*)CreateBufferView},
10329    {"vkCreateImage", (void*)CreateImage},
10330    {"vkCreateImageView", (void*)CreateImageView},
10331    {"vkCreateFence", (void*)CreateFence},
10332    {"vkCreatePipelineCache", (void*)CreatePipelineCache},
10333    {"vkDestroyPipelineCache", (void*)DestroyPipelineCache},
10334    {"vkGetPipelineCacheData", (void*)GetPipelineCacheData},
10335    {"vkMergePipelineCaches", (void*)MergePipelineCaches},
10336    {"vkCreateGraphicsPipelines", (void*)CreateGraphicsPipelines},
10337    {"vkCreateComputePipelines", (void*)CreateComputePipelines},
10338    {"vkCreateSampler", (void*)CreateSampler},
10339    {"vkCreateDescriptorSetLayout", (void*)CreateDescriptorSetLayout},
10340    {"vkCreatePipelineLayout", (void*)CreatePipelineLayout},
10341    {"vkCreateDescriptorPool", (void*)CreateDescriptorPool},
10342    {"vkResetDescriptorPool", (void*)ResetDescriptorPool},
10343    {"vkAllocateDescriptorSets", (void*)AllocateDescriptorSets},
10344    {"vkFreeDescriptorSets", (void*)FreeDescriptorSets},
10345    {"vkUpdateDescriptorSets", (void*)UpdateDescriptorSets},
10346    {"vkCreateCommandPool", (void*)CreateCommandPool},
10347    {"vkDestroyCommandPool", (void*)DestroyCommandPool},
10348    {"vkResetCommandPool", (void*)ResetCommandPool},
10349    {"vkCreateQueryPool", (void*)CreateQueryPool},
10350    {"vkAllocateCommandBuffers", (void*)AllocateCommandBuffers},
10351    {"vkFreeCommandBuffers", (void*)FreeCommandBuffers},
10352    {"vkBeginCommandBuffer", (void*)BeginCommandBuffer},
10353    {"vkEndCommandBuffer", (void*)EndCommandBuffer},
10354    {"vkResetCommandBuffer", (void*)ResetCommandBuffer},
10355    {"vkCmdBindPipeline", (void*)CmdBindPipeline},
10356    {"vkCmdSetViewport", (void*)CmdSetViewport},
10357    {"vkCmdSetScissor", (void*)CmdSetScissor},
10358    {"vkCmdSetLineWidth", (void*)CmdSetLineWidth},
10359    {"vkCmdSetDepthBias", (void*)CmdSetDepthBias},
10360    {"vkCmdSetBlendConstants", (void*)CmdSetBlendConstants},
10361    {"vkCmdSetDepthBounds", (void*)CmdSetDepthBounds},
10362    {"vkCmdSetStencilCompareMask", (void*)CmdSetStencilCompareMask},
10363    {"vkCmdSetStencilWriteMask", (void*)CmdSetStencilWriteMask},
10364    {"vkCmdSetStencilReference", (void*)CmdSetStencilReference},
10365    {"vkCmdBindDescriptorSets", (void*)CmdBindDescriptorSets},
10366    {"vkCmdBindVertexBuffers", (void*)CmdBindVertexBuffers},
10367    {"vkCmdBindIndexBuffer", (void*)CmdBindIndexBuffer},
10368    {"vkCmdDraw", (void*)CmdDraw},
10369    {"vkCmdDrawIndexed", (void*)CmdDrawIndexed},
10370    {"vkCmdDrawIndirect", (void*)CmdDrawIndirect},
10371    {"vkCmdDrawIndexedIndirect", (void*)CmdDrawIndexedIndirect},
10372    {"vkCmdDispatch", (void*)CmdDispatch},
10373    {"vkCmdDispatchIndirect", (void*)CmdDispatchIndirect},
10374    {"vkCmdCopyBuffer", (void*)CmdCopyBuffer},
10375    {"vkCmdCopyImage", (void*)CmdCopyImage},
10376    {"vkCmdBlitImage", (void*)CmdBlitImage},
10377    {"vkCmdCopyBufferToImage", (void*)CmdCopyBufferToImage},
10378    {"vkCmdCopyImageToBuffer", (void*)CmdCopyImageToBuffer},
10379    {"vkCmdUpdateBuffer", (void*)CmdUpdateBuffer},
10380    {"vkCmdFillBuffer", (void*)CmdFillBuffer},
10381    {"vkCmdClearColorImage", (void*)CmdClearColorImage},
10382    {"vkCmdClearDepthStencilImage", (void*)CmdClearDepthStencilImage},
10383    {"vkCmdClearAttachments", (void*)CmdClearAttachments},
10384    {"vkCmdResolveImage", (void*)CmdResolveImage},
10385    {"vkGetImageSubresourceLayout", (void*)GetImageSubresourceLayout},
10386    {"vkCmdSetEvent", (void*)CmdSetEvent},
10387    {"vkCmdResetEvent", (void*)CmdResetEvent},
10388    {"vkCmdWaitEvents", (void*)CmdWaitEvents},
10389    {"vkCmdPipelineBarrier", (void*)CmdPipelineBarrier},
10390    {"vkCmdBeginQuery", (void*)CmdBeginQuery},
10391    {"vkCmdEndQuery", (void*)CmdEndQuery},
10392    {"vkCmdResetQueryPool", (void*)CmdResetQueryPool},
10393    {"vkCmdCopyQueryPoolResults", (void*)CmdCopyQueryPoolResults},
10394    {"vkCmdPushConstants", (void*)CmdPushConstants},
10395    {"vkCmdWriteTimestamp", (void*)CmdWriteTimestamp},
10396    {"vkCreateFramebuffer", (void*)CreateFramebuffer},
10397    {"vkCreateShaderModule", (void*)CreateShaderModule},
10398    {"vkCreateRenderPass", (void*)CreateRenderPass},
10399    {"vkCmdBeginRenderPass", (void*)CmdBeginRenderPass},
10400    {"vkCmdNextSubpass", (void*)CmdNextSubpass},
10401    {"vkCmdEndRenderPass", (void*)CmdEndRenderPass},
10402    {"vkCmdExecuteCommands", (void*)CmdExecuteCommands},
10403    {"vkCmdDebugMarkerBeginEXT", (void*)CmdDebugMarkerBeginEXT},
10404    {"vkCmdDebugMarkerEndEXT", (void*)CmdDebugMarkerEndEXT},
10405    {"vkCmdDebugMarkerInsertEXT", (void*)CmdDebugMarkerInsertEXT},
10406    {"vkDebugMarkerSetObjectNameEXT", (void*)DebugMarkerSetObjectNameEXT},
10407    {"vkDebugMarkerSetObjectTagEXT", (void*)DebugMarkerSetObjectTagEXT},
10408    {"vkSetEvent", (void*)SetEvent},
10409    {"vkMapMemory", (void*)MapMemory},
10410    {"vkUnmapMemory", (void*)UnmapMemory},
10411    {"vkFlushMappedMemoryRanges", (void*)FlushMappedMemoryRanges},
10412    {"vkInvalidateMappedMemoryRanges", (void*)InvalidateMappedMemoryRanges},
10413    {"vkAllocateMemory", (void*)AllocateMemory},
10414    {"vkFreeMemory", (void*)FreeMemory},
10415    {"vkBindBufferMemory", (void*)BindBufferMemory},
10416    {"vkGetBufferMemoryRequirements", (void*)GetBufferMemoryRequirements},
10417    {"vkGetImageMemoryRequirements", (void*)GetImageMemoryRequirements},
10418    {"vkGetQueryPoolResults", (void*)GetQueryPoolResults},
10419    {"vkBindImageMemory", (void*)BindImageMemory},
10420    {"vkQueueBindSparse", (void*)QueueBindSparse},
10421    {"vkCreateSemaphore", (void*)CreateSemaphore},
10422    {"vkCreateEvent", (void*)CreateEvent},
10423#ifdef VK_USE_PLATFORM_ANDROID_KHR
10424    {"vkCreateAndroidSurfaceKHR", (void*)CreateAndroidSurfaceKHR},
10425#endif
10426#ifdef VK_USE_PLATFORM_MIR_KHR
10427    {"vkCreateMirSurfaceKHR", (void*)CreateMirSurfaceKHR},
10428    {"vkGetPhysicalDeviceMirPresentationSupportKHR", (void*)GetPhysicalDeviceMirPresentationSupportKHR},
10429#endif
10430#ifdef VK_USE_PLATFORM_WAYLAND_KHR
10431    {"vkCreateWaylandSurfaceKHR", (void*)CreateWaylandSurfaceKHR},
10432    {"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void*)GetPhysicalDeviceWaylandPresentationSupportKHR},
10433#endif
10434#ifdef VK_USE_PLATFORM_WIN32_KHR
10435    {"vkCreateWin32SurfaceKHR", (void*)CreateWin32SurfaceKHR},
10436    {"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void*)GetPhysicalDeviceWin32PresentationSupportKHR},
10437#endif
10438#ifdef VK_USE_PLATFORM_XCB_KHR
10439    {"vkCreateXcbSurfaceKHR", (void*)CreateXcbSurfaceKHR},
10440    {"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void*)GetPhysicalDeviceXcbPresentationSupportKHR},
10441#endif
10442#ifdef VK_USE_PLATFORM_XLIB_KHR
10443    {"vkCreateXlibSurfaceKHR", (void*)CreateXlibSurfaceKHR},
10444    {"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void*)GetPhysicalDeviceXlibPresentationSupportKHR},
10445#endif
10446    {"vkCreateDisplayPlaneSurfaceKHR", (void*)CreateDisplayPlaneSurfaceKHR},
10447    {"vkDestroySurfaceKHR", (void*)DestroySurfaceKHR},
10448    {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void*)GetPhysicalDeviceSurfaceCapabilitiesKHR},
10449    {"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void*)GetPhysicalDeviceSurfaceCapabilities2KHR},
10450    {"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void*)GetPhysicalDeviceSurfaceCapabilities2EXT},
10451    {"vkGetPhysicalDeviceSurfaceSupportKHR", (void*)GetPhysicalDeviceSurfaceSupportKHR},
10452    {"vkGetPhysicalDeviceSurfacePresentModesKHR", (void*)GetPhysicalDeviceSurfacePresentModesKHR},
10453    {"vkGetPhysicalDeviceSurfaceFormatsKHR", (void*)GetPhysicalDeviceSurfaceFormatsKHR},
10454    {"vkGetPhysicalDeviceSurfaceFormats2KHR", (void*)GetPhysicalDeviceSurfaceFormats2KHR},
10455    {"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void*)GetPhysicalDeviceQueueFamilyProperties2KHR},
10456    {"vkEnumeratePhysicalDeviceGroupsKHX", (void*)EnumeratePhysicalDeviceGroupsKHX},
10457    {"vkCreateDebugReportCallbackEXT", (void*)CreateDebugReportCallbackEXT},
10458    {"vkDestroyDebugReportCallbackEXT", (void*)DestroyDebugReportCallbackEXT},
10459    {"vkDebugReportMessageEXT", (void*)DebugReportMessageEXT},
10460    {"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void*)GetPhysicalDeviceDisplayPlanePropertiesKHR},
10461    {"GetDisplayPlaneSupportedDisplaysKHR", (void*)GetDisplayPlaneSupportedDisplaysKHR},
10462    {"GetDisplayPlaneCapabilitiesKHR", (void*)GetDisplayPlaneCapabilitiesKHR},
10463};
10464
10465VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
10466    assert(device);
10467    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10468
10469    // Is API to be intercepted by this layer?
10470    const auto &item = name_to_funcptr_map.find(funcName);
10471    if (item != name_to_funcptr_map.end()) {
10472        return reinterpret_cast<PFN_vkVoidFunction>(item->second);
10473    }
10474
10475    auto &table = device_data->dispatch_table;
10476    if (!table.GetDeviceProcAddr) return nullptr;
10477    return table.GetDeviceProcAddr(device, funcName);
10478}
10479
10480VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
10481    instance_layer_data *instance_data;
10482    // Is API to be intercepted by this layer?
10483    const auto &item = name_to_funcptr_map.find(funcName);
10484    if (item != name_to_funcptr_map.end()) {
10485        return reinterpret_cast<PFN_vkVoidFunction>(item->second);
10486    }
10487
10488    instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10489    auto &table = instance_data->dispatch_table;
10490    if (!table.GetInstanceProcAddr) return nullptr;
10491    return table.GetInstanceProcAddr(instance, funcName);
10492}
10493
10494VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
10495    assert(instance);
10496    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10497
10498    auto &table = instance_data->dispatch_table;
10499    if (!table.GetPhysicalDeviceProcAddr) return nullptr;
10500    return table.GetPhysicalDeviceProcAddr(instance, funcName);
10501}
10502
10503}  // namespace core_validation
10504
10505// loader-layer interface v0, just wrappers since there is only a layer
10506
10507VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
10508                                                                                      VkExtensionProperties *pProperties) {
10509    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
10510}
10511
10512VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
10513                                                                                  VkLayerProperties *pProperties) {
10514    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
10515}
10516
10517VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
10518                                                                                VkLayerProperties *pProperties) {
10519    // the layer command handles VK_NULL_HANDLE just fine internally
10520    assert(physicalDevice == VK_NULL_HANDLE);
10521    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
10522}
10523
10524VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10525                                                                                    const char *pLayerName, uint32_t *pCount,
10526                                                                                    VkExtensionProperties *pProperties) {
10527    // the layer command handles VK_NULL_HANDLE just fine internally
10528    assert(physicalDevice == VK_NULL_HANDLE);
10529    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10530}
10531
10532VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10533    return core_validation::GetDeviceProcAddr(dev, funcName);
10534}
10535
10536VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10537    return core_validation::GetInstanceProcAddr(instance, funcName);
10538}
10539
10540VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
10541                                                                                           const char *funcName) {
10542    return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
10543}
10544
10545VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
10546    assert(pVersionStruct != NULL);
10547    assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
10548
10549    // Fill in the function pointers if our version is at least capable of having the structure contain them.
10550    if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
10551        pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
10552        pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
10553        pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
10554    }
10555
10556    if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
10557        core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
10558    } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
10559        pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
10560    }
10561
10562    return VK_SUCCESS;
10563}
10564