core_validation.cpp revision 3025e72bc2b727622969d036966f50057392551a
1/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
34 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
39#include <algorithm>
40#include <assert.h>
41#include <iostream>
42#include <list>
43#include <map>
44#include <memory>
45#include <mutex>
46#include <set>
47#include <sstream>
48#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
51#include <string>
52#include <inttypes.h>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_enum_string_helper.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "core_validation.h"
64#include "buffer_validation.h"
65#include "shader_validation.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_extension_utils.h"
69#include "vk_layer_utils.h"
70
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
75#define LOGCONSOLE(...)      \
76    {                        \
77        printf(__VA_ARGS__); \
78        printf("\n");        \
79    }
80#endif
81
82// TODO: remove on NDK update (r15 will probably have proper STL impl)
83#ifdef __ANDROID__
84namespace std {
85
86template <typename T>
87std::string to_string(T var) {
88    std::ostringstream ss;
89    ss << var;
90    return ss.str();
91}
92}
93#endif
94
95// This intentionally includes a cpp file
96#include "vk_safe_struct.cpp"
97
98using mutex_t = std::mutex;
99using lock_guard_t = std::lock_guard<mutex_t>;
100using unique_lock_t = std::unique_lock<mutex_t>;
101
102namespace core_validation {
103
104using std::unordered_map;
105using std::unordered_set;
106using std::unique_ptr;
107using std::vector;
108using std::string;
109using std::stringstream;
110using std::max;
111
112// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
113// Object value will be used to identify them internally.
114static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
115// 2nd special memory handle used to flag object as unbound from memory
116static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
117
118// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
119// by the extent of a swapchain targeting the surface.
120static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
121
122struct instance_layer_data {
123    VkInstance instance = VK_NULL_HANDLE;
124    debug_report_data *report_data = nullptr;
125    std::vector<VkDebugReportCallbackEXT> logging_callback;
126    VkLayerInstanceDispatchTable dispatch_table;
127
128    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
129    uint32_t physical_devices_count = 0;
130    CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
131    uint32_t physical_device_groups_count = 0;
132    CHECK_DISABLED disabled = {};
133
134    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
135    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
136
137    InstanceExtensions extensions;
138};
139
140struct layer_data {
141    debug_report_data *report_data = nullptr;
142    VkLayerDispatchTable dispatch_table;
143
144    DeviceExtensions extensions = {};
145    unordered_set<VkQueue> queues;  // All queues under given device
146    // Layer specific data
147    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
148    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
149    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
150    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
151    unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
152    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
153    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
154    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
155    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
156    unordered_map<VkDescriptorSetLayout, std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> descriptorSetLayoutMap;
157    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
158    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
159    unordered_map<VkFence, FENCE_NODE> fenceMap;
160    unordered_map<VkQueue, QUEUE_STATE> queueMap;
161    unordered_map<VkEvent, EVENT_STATE> eventMap;
162    unordered_map<QueryObject, bool> queryToStateMap;
163    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
164    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
165    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
166    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
167    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
168    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
169    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
170    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
171    unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
172    unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
173
174    VkDevice device = VK_NULL_HANDLE;
175    VkPhysicalDevice physical_device = VK_NULL_HANDLE;
176
177    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
178
179    VkPhysicalDeviceFeatures enabled_features = {};
180    // Device specific data
181    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
182    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
183    VkPhysicalDeviceProperties phys_dev_props = {};
184};
185
186// TODO : Do we need to guard access to layer_data_map w/ lock?
187static unordered_map<void *, layer_data *> layer_data_map;
188static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
189
190static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
191
192static const VkLayerProperties global_layer = {
193    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
194};
195
196template <class TCreateInfo>
197void ValidateLayerOrdering(const TCreateInfo &createInfo) {
198    bool foundLayer = false;
199    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
200        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
201            foundLayer = true;
202        }
203        // This has to be logged to console as we don't have a callback at this point.
204        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
205            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
206        }
207    }
208}
209
210// TODO : This can be much smarter, using separate locks for separate global data
211static mutex_t global_lock;
212
213// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
214IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
215    auto iv_it = dev_data->imageViewMap.find(image_view);
216    if (iv_it == dev_data->imageViewMap.end()) {
217        return nullptr;
218    }
219    return iv_it->second.get();
220}
221// Return sampler node ptr for specified sampler or else NULL
222SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
223    auto sampler_it = dev_data->samplerMap.find(sampler);
224    if (sampler_it == dev_data->samplerMap.end()) {
225        return nullptr;
226    }
227    return sampler_it->second.get();
228}
229// Return image state ptr for specified image or else NULL
230IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
231    auto img_it = dev_data->imageMap.find(image);
232    if (img_it == dev_data->imageMap.end()) {
233        return nullptr;
234    }
235    return img_it->second.get();
236}
237// Return buffer state ptr for specified buffer or else NULL
238BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
239    auto buff_it = dev_data->bufferMap.find(buffer);
240    if (buff_it == dev_data->bufferMap.end()) {
241        return nullptr;
242    }
243    return buff_it->second.get();
244}
245// Return swapchain node for specified swapchain or else NULL
246SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
247    auto swp_it = dev_data->swapchainMap.find(swapchain);
248    if (swp_it == dev_data->swapchainMap.end()) {
249        return nullptr;
250    }
251    return swp_it->second.get();
252}
253// Return buffer node ptr for specified buffer or else NULL
254BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
255    auto bv_it = dev_data->bufferViewMap.find(buffer_view);
256    if (bv_it == dev_data->bufferViewMap.end()) {
257        return nullptr;
258    }
259    return bv_it->second.get();
260}
261
262FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
263    auto it = dev_data->fenceMap.find(fence);
264    if (it == dev_data->fenceMap.end()) {
265        return nullptr;
266    }
267    return &it->second;
268}
269
270EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
271    auto it = dev_data->eventMap.find(event);
272    if (it == dev_data->eventMap.end()) {
273        return nullptr;
274    }
275    return &it->second;
276}
277
278QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
279    auto it = dev_data->queryPoolMap.find(query_pool);
280    if (it == dev_data->queryPoolMap.end()) {
281        return nullptr;
282    }
283    return &it->second;
284}
285
286QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
287    auto it = dev_data->queueMap.find(queue);
288    if (it == dev_data->queueMap.end()) {
289        return nullptr;
290    }
291    return &it->second;
292}
293
294SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
295    auto it = dev_data->semaphoreMap.find(semaphore);
296    if (it == dev_data->semaphoreMap.end()) {
297        return nullptr;
298    }
299    return &it->second;
300}
301
302COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
303    auto it = dev_data->commandPoolMap.find(pool);
304    if (it == dev_data->commandPoolMap.end()) {
305        return nullptr;
306    }
307    return &it->second;
308}
309
310PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
311    auto it = instance_data->physical_device_map.find(phys);
312    if (it == instance_data->physical_device_map.end()) {
313        return nullptr;
314    }
315    return &it->second;
316}
317
318SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
319    auto it = instance_data->surface_map.find(surface);
320    if (it == instance_data->surface_map.end()) {
321        return nullptr;
322    }
323    return &it->second;
324}
325
326DeviceExtensions const *GetEnabledExtensions(layer_data const *dev_data) {
327    return &dev_data->extensions;
328}
329
330// Return ptr to memory binding for given handle of specified type
331static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
332    switch (type) {
333        case kVulkanObjectTypeImage:
334            return GetImageState(dev_data, VkImage(handle));
335        case kVulkanObjectTypeBuffer:
336            return GetBufferState(dev_data, VkBuffer(handle));
337        default:
338            break;
339    }
340    return nullptr;
341}
342// prototype
343GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
344
345// Return ptr to info in map container containing mem, or NULL if not found
346//  Calls to this function should be wrapped in mutex
347DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
348    auto mem_it = dev_data->memObjMap.find(mem);
349    if (mem_it == dev_data->memObjMap.end()) {
350        return NULL;
351    }
352    return mem_it->second.get();
353}
354
355static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
356                             const VkMemoryAllocateInfo *pAllocateInfo) {
357    assert(object != NULL);
358
359    dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
360}
361
362// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
363static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle, VulkanObjectType type,
364                                  const char *functionName) {
365    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
366    if (mem_info) {
367        if (!mem_info->bound_ranges[bound_object_handle].valid) {
368            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
369                           HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
370                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
371                           ", please fill the memory before using.",
372                           functionName, HandleToUint64(mem), object_string[type], bound_object_handle);
373        }
374    }
375    return false;
376}
377// For given image_state
378//  If mem is special swapchain key, then verify that image_state valid member is true
379//  Else verify that the image's bound memory range is valid
380bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
381    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
382        if (!image_state->valid) {
383            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
384                           HandleToUint64(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
385                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
386                           functionName, HandleToUint64(image_state->image));
387        }
388    } else {
389        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), kVulkanObjectTypeImage,
390                                     functionName);
391    }
392    return false;
393}
394// For given buffer_state, verify that the range it's bound to is valid
395bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
396    return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer,
397                                 functionName);
398}
399// For the given memory allocation, set the range bound by the given handle object to the valid param value
400static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
401    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
402    if (mem_info) {
403        mem_info->bound_ranges[handle].valid = valid;
404    }
405}
406// For given image node
407//  If mem is special swapchain key, then set entire image_state to valid param value
408//  Else set the image's bound memory range to valid param value
409void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
410    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
411        image_state->valid = valid;
412    } else {
413        SetMemoryValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), valid);
414    }
415}
416// For given buffer node set the buffer's bound memory range to valid param value
417void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
418    SetMemoryValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), valid);
419}
420
421// Create binding link between given sampler and command buffer node
422void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
423    sampler_state->cb_bindings.insert(cb_node);
424    cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
425}
426
427// Create binding link between given image node and command buffer node
428void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
429    // Skip validation if this image was created through WSI
430    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
431        // First update CB binding in MemObj mini CB list
432        for (auto mem_binding : image_state->GetBoundMemory()) {
433            DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
434            if (pMemInfo) {
435                pMemInfo->cb_bindings.insert(cb_node);
436                // Now update CBInfo's Mem reference list
437                cb_node->memObjs.insert(mem_binding);
438            }
439        }
440        // Now update cb binding for image
441        cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
442        image_state->cb_bindings.insert(cb_node);
443    }
444}
445
446// Create binding link between given image view node and its image with command buffer node
447void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
448    // First add bindings for imageView
449    view_state->cb_bindings.insert(cb_node);
450    cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
451    auto image_state = GetImageState(dev_data, view_state->create_info.image);
452    // Add bindings for image within imageView
453    if (image_state) {
454        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
455    }
456}
457
458// Create binding link between given buffer node and command buffer node
459void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
460    // First update CB binding in MemObj mini CB list
461    for (auto mem_binding : buffer_state->GetBoundMemory()) {
462        DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
463        if (pMemInfo) {
464            pMemInfo->cb_bindings.insert(cb_node);
465            // Now update CBInfo's Mem reference list
466            cb_node->memObjs.insert(mem_binding);
467        }
468    }
469    // Now update cb binding for buffer
470    cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
471    buffer_state->cb_bindings.insert(cb_node);
472}
473
474// Create binding link between given buffer view node and its buffer with command buffer node
475void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
476    // First add bindings for bufferView
477    view_state->cb_bindings.insert(cb_node);
478    cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
479    auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
480    // Add bindings for buffer within bufferView
481    if (buffer_state) {
482        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
483    }
484}
485
486// For every mem obj bound to particular CB, free bindings related to that CB
487static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
488    if (cb_node) {
489        if (cb_node->memObjs.size() > 0) {
490            for (auto mem : cb_node->memObjs) {
491                DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
492                if (pInfo) {
493                    pInfo->cb_bindings.erase(cb_node);
494                }
495            }
496            cb_node->memObjs.clear();
497        }
498    }
499}
500
501// Clear a single object binding from given memory object, or report error if binding is missing
502static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
503    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
504    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
505    if (mem_info) {
506        mem_info->obj_bindings.erase({handle, type});
507    }
508    return false;
509}
510
511// ClearMemoryObjectBindings clears the binding of objects to memory
512//  For the given object it pulls the memory bindings and makes sure that the bindings
513//  no longer refer to the object being cleared. This occurs when objects are destroyed.
514bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
515    bool skip = false;
516    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
517    if (mem_binding) {
518        if (!mem_binding->sparse) {
519            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
520        } else {  // Sparse, clear all bindings
521            for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
522                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
523            }
524        }
525    }
526    return skip;
527}
528
529// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
530bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
531                              const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
532    bool result = false;
533    if (VK_NULL_HANDLE == mem) {
534        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
535                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
536                                                      " used with no memory bound. Memory should be bound by calling "
537                                                      "vkBind%sMemory(). %s",
538                         api_name, type_name, handle, type_name, validation_error_map[error_code]);
539    } else if (MEMORY_UNBOUND == mem) {
540        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
541                         __LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
542                                                      " used with no memory bound and previously bound memory was freed. "
543                                                      "Memory must not be freed prior to this operation. %s",
544                         api_name, type_name, handle, validation_error_map[error_code]);
545    }
546    return result;
547}
548
549// Check to see if memory was ever bound to this image
550bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
551                                  UNIQUE_VALIDATION_ERROR_CODE error_code) {
552    bool result = false;
553    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
554        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
555                                          error_code);
556    }
557    return result;
558}
559
560// Check to see if memory was bound to this buffer
561bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
562                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
563    bool result = false;
564    if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
565        result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
566                                          "Buffer", error_code);
567    }
568    return result;
569}
570
571// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
572// Corresponding valid usage checks are in ValidateSetMemBinding().
573static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type, const char *apiName) {
574    if (mem != VK_NULL_HANDLE) {
575        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
576        assert(mem_binding);
577        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
578        if (mem_info) {
579            mem_info->obj_bindings.insert({handle, type});
580            // For image objects, make sure default memory state is correctly set
581            // TODO : What's the best/correct way to handle this?
582            if (kVulkanObjectTypeImage == type) {
583                auto const image_state = GetImageState(dev_data, VkImage(handle));
584                if (image_state) {
585                    VkImageCreateInfo ici = image_state->createInfo;
586                    if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
587                        // TODO::  More memory state transition stuff.
588                    }
589                }
590            }
591            mem_binding->binding.mem = mem;
592        }
593    }
594}
595
596// Valid usage checks for a call to SetMemBinding().
597// For NULL mem case, output warning
598// Make sure given object is in global object map
599//  IF a previous binding existed, output validation error
600//  Otherwise, add reference from objectInfo to memoryInfo
601//  Add reference off of objInfo
602// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
603static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
604                                  const char *apiName) {
605    bool skip = false;
606    // It's an error to bind an object to NULL memory
607    if (mem != VK_NULL_HANDLE) {
608        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
609        assert(mem_binding);
610        if (mem_binding->sparse) {
611            UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_1740082a;
612            const char *handle_type = "IMAGE";
613            if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
614                error_code = VALIDATION_ERROR_1700080c;
615                handle_type = "BUFFER";
616            } else {
617                assert(strcmp(apiName, "vkBindImageMemory()") == 0);
618            }
619            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
620                            HandleToUint64(mem), __LINE__, error_code, "MEM",
621                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
622                            ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT). %s",
623                            apiName, HandleToUint64(mem), handle, handle_type, validation_error_map[error_code]);
624        }
625        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
626        if (mem_info) {
627            DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
628            if (prev_binding) {
629                UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_17400828;
630                if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
631                    error_code = VALIDATION_ERROR_1700080a;
632                } else {
633                    assert(strcmp(apiName, "vkBindImageMemory()") == 0);
634                }
635                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
636                                HandleToUint64(mem), __LINE__, error_code, "MEM",
637                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
638                                ") which has already been bound to mem object 0x%" PRIxLEAST64 ". %s",
639                                apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem),
640                                validation_error_map[error_code]);
641            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
642                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
643                                HandleToUint64(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
644                                "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
645                                ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
646                                "Vulkan so this attempt to bind to new memory is not allowed.",
647                                apiName, HandleToUint64(mem), handle);
648            }
649        }
650    }
651    return skip;
652}
653
654// For NULL mem case, clear any previous binding Else...
655// Make sure given object is in its object map
656//  IF a previous binding existed, update binding
657//  Add reference from objectInfo to memoryInfo
658//  Add reference off of object's binding info
659// Return VK_TRUE if addition is successful, VK_FALSE otherwise
660static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
661    bool skip = VK_FALSE;
662    // Handle NULL case separately, just clear previous binding & decrement reference
663    if (binding.mem == VK_NULL_HANDLE) {
664        // TODO : This should cause the range of the resource to be unbound according to spec
665    } else {
666        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
667        assert(mem_binding);
668        assert(mem_binding->sparse);
669        DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
670        if (mem_info) {
671            mem_info->obj_bindings.insert({handle, type});
672            // Need to set mem binding for this object
673            mem_binding->sparse_bindings.insert(binding);
674        }
675    }
676    return skip;
677}
678
679// Check object status for selected flag state
680static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
681                            const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
682    if (!(pNode->status & status_mask)) {
683        char const *const message = validation_error_map[msg_code];
684        return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
685                       HandleToUint64(pNode->commandBuffer), __LINE__, msg_code, "DS", "command buffer object 0x%p: %s. %s.",
686                       pNode->commandBuffer, fail_msg, message);
687    }
688    return false;
689}
690
691// Retrieve pipeline node ptr for given pipeline object
692static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
693    auto it = dev_data->pipelineMap.find(pipeline);
694    if (it == dev_data->pipelineMap.end()) {
695        return nullptr;
696    }
697    return it->second;
698}
699
700RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
701    auto it = dev_data->renderPassMap.find(renderpass);
702    if (it == dev_data->renderPassMap.end()) {
703        return nullptr;
704    }
705    return it->second.get();
706}
707
708FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
709    auto it = dev_data->frameBufferMap.find(framebuffer);
710    if (it == dev_data->frameBufferMap.end()) {
711        return nullptr;
712    }
713    return it->second.get();
714}
715
716std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
717                                                                                         VkDescriptorSetLayout dsLayout) {
718    auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
719    if (it == dev_data->descriptorSetLayoutMap.end()) {
720        return nullptr;
721    }
722    return it->second;
723}
724
725static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
726    auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
727    if (it == dev_data->pipelineLayoutMap.end()) {
728        return nullptr;
729    }
730    return &it->second;
731}
732
733shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
734    auto it = dev_data->shaderModuleMap.find(module);
735    if (it == dev_data->shaderModuleMap.end()) {
736        return nullptr;
737    }
738    return it->second.get();
739}
740
741// Return true if for a given PSO, the given state enum is dynamic, else return false
742static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
743    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
744        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
745            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
746        }
747    }
748    return false;
749}
750
751// Validate state stored as flags at time of draw call
752static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
753                                      UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
754    bool result = false;
755    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
756        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
757         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
758        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
759                                  "Dynamic line width state not set for this command buffer", msg_code);
760    }
761    if (pPipe->graphicsPipelineCI.pRasterizationState &&
762        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
763        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
764                                  "Dynamic depth bias state not set for this command buffer", msg_code);
765    }
766    if (pPipe->blendConstantsEnabled) {
767        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
768                                  "Dynamic blend constants state not set for this command buffer", msg_code);
769    }
770    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
771        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
772        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
773                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
774    }
775    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
776        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
777        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
778                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
779        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
780                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
781        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
782                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
783    }
784    if (indexed) {
785        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
786                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
787    }
788
789    return result;
790}
791
792// Verify attachment reference compatibility according to spec
793//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
794//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
795//   to make sure that format and samples counts match.
796//  If not, they are not compatible.
797static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
798                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
799                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
800                                             const VkAttachmentDescription *pSecondaryAttachments) {
801    // Check potential NULL cases first to avoid nullptr issues later
802    if (pPrimary == nullptr) {
803        if (pSecondary == nullptr) {
804            return true;
805        }
806        return false;
807    } else if (pSecondary == nullptr) {
808        return false;
809    }
810    if (index >= primaryCount) {  // Check secondary as if primary is VK_ATTACHMENT_UNUSED
811        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) return true;
812    } else if (index >= secondaryCount) {  // Check primary as if secondary is VK_ATTACHMENT_UNUSED
813        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) return true;
814    } else {  // Format and sample count must match
815        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
816            return true;
817        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
818            return false;
819        }
820        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
821             pSecondaryAttachments[pSecondary[index].attachment].format) &&
822            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
823             pSecondaryAttachments[pSecondary[index].attachment].samples))
824            return true;
825    }
826    // Format and sample counts didn't match
827    return false;
828}
829// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
830// For given primary RenderPass object and secondary RenderPassCreateInfo, verify that they're compatible
831static bool verify_renderpass_compatibility(const layer_data *dev_data, const VkRenderPassCreateInfo *primaryRPCI,
832                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
833    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
834        stringstream errorStr;
835        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
836                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
837        errorMsg = errorStr.str();
838        return false;
839    }
840    uint32_t spIndex = 0;
841    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
842        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
843        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
844        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
845        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
846        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
847            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
848                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
849                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
850                stringstream errorStr;
851                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
852                errorMsg = errorStr.str();
853                return false;
854            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
855                                                         primaryColorCount, primaryRPCI->pAttachments,
856                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
857                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
858                stringstream errorStr;
859                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
860                errorMsg = errorStr.str();
861                return false;
862            }
863        }
864
865        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 1,
866                                              primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
867                                              1, secondaryRPCI->pAttachments)) {
868            stringstream errorStr;
869            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
870            errorMsg = errorStr.str();
871            return false;
872        }
873
874        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
875        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
876        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
877        for (uint32_t i = 0; i < inputMax; ++i) {
878            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryInputCount,
879                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
880                                                  secondaryInputCount, secondaryRPCI->pAttachments)) {
881                stringstream errorStr;
882                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
883                errorMsg = errorStr.str();
884                return false;
885            }
886        }
887    }
888    return true;
889}
890
891// Return Set node ptr for specified set or else NULL
892cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
893    auto set_it = dev_data->setMap.find(set);
894    if (set_it == dev_data->setMap.end()) {
895        return NULL;
896    }
897    return set_it->second;
898}
899
900// For given pipeline, return number of MSAA samples, or one if MSAA disabled
901static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
902    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
903        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
904        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
905    }
906    return VK_SAMPLE_COUNT_1_BIT;
907}
908
909static void list_bits(std::ostream &s, uint32_t bits) {
910    for (int i = 0; i < 32 && bits; i++) {
911        if (bits & (1 << i)) {
912            s << i;
913            bits &= ~(1 << i);
914            if (bits) {
915                s << ",";
916            }
917        }
918    }
919}
920
921// Validate draw-time state related to the PSO
922static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
923                                          PIPELINE_STATE const *pPipeline) {
924    bool skip = false;
925
926    // Verify vertex binding
927    if (pPipeline->vertexBindingDescriptions.size() > 0) {
928        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
929            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
930            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
931                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
932                skip |=
933                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
934                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
935                            "The Pipeline State Object (0x%" PRIxLEAST64
936                            ") expects that this Command Buffer's vertex binding Index %u "
937                            "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
938                            "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
939                            HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
940            }
941        }
942    } else {
943        if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
944            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
945                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
946                            DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
947                            "Vertex buffers are bound to command buffer (0x%p"
948                            ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
949                            pCB->commandBuffer, HandleToUint64(state.pipeline_state->pipeline));
950        }
951    }
952    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
953    // Skip check if rasterization is disabled or there is no viewport.
954    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
955         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
956        pPipeline->graphicsPipelineCI.pViewportState) {
957        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
958        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
959
960        if (dynViewport) {
961            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
962            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
963            if (missingViewportMask) {
964                std::stringstream ss;
965                ss << "Dynamic viewport(s) ";
966                list_bits(ss, missingViewportMask);
967                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
968                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
969                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
970            }
971        }
972
973        if (dynScissor) {
974            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
975            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
976            if (missingScissorMask) {
977                std::stringstream ss;
978                ss << "Dynamic scissor(s) ";
979                list_bits(ss, missingScissorMask);
980                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
981                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
982                                __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
983            }
984        }
985    }
986
987    // Verify that any MSAA request in PSO matches sample# in bound FB
988    // Skip the check if rasterization is disabled.
989    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
990        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
991        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
992        if (pCB->activeRenderPass) {
993            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
994            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
995            uint32_t i;
996            unsigned subpass_num_samples = 0;
997
998            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
999                auto attachment = subpass_desc->pColorAttachments[i].attachment;
1000                if (attachment != VK_ATTACHMENT_UNUSED)
1001                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1002            }
1003
1004            if (subpass_desc->pDepthStencilAttachment &&
1005                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1006                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1007                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1008            }
1009
1010            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
1011                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1012                                HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1013                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
1014                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
1015                                HandleToUint64(pPipeline->pipeline), pso_num_samples,
1016                                HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
1017            }
1018        } else {
1019            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1020                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1021                            "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
1022                            HandleToUint64(pPipeline->pipeline));
1023        }
1024    }
1025    // Verify that PSO creation renderPass is compatible with active renderPass
1026    if (pCB->activeRenderPass) {
1027        std::string err_string;
1028        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
1029            !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
1030                                             err_string)) {
1031            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1032            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1033                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
1034                            "At Draw time the active render pass (0x%" PRIxLEAST64
1035                            ") is incompatible w/ gfx pipeline "
1036                            "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
1037                            HandleToUint64(pCB->activeRenderPass->renderPass), HandleToUint64(pPipeline->pipeline),
1038                            HandleToUint64(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
1039        }
1040
1041        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1042            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1043                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
1044                            "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
1045                            pCB->activeSubpass);
1046        }
1047    }
1048    // TODO : Add more checks here
1049
1050    return skip;
1051}
1052
1053// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1054// pipelineLayout[layoutIndex]
1055static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1056                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1057                                            string &errorMsg) {
1058    auto num_sets = pipeline_layout->set_layouts.size();
1059    if (layoutIndex >= num_sets) {
1060        stringstream errorStr;
1061        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1062                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1063                 << layoutIndex;
1064        errorMsg = errorStr.str();
1065        return false;
1066    }
1067    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1068    return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
1069}
1070
1071// Validate overall state at the time of a draw call
1072static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
1073                              const VkPipelineBindPoint bind_point, const char *function,
1074                              UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1075    bool result = false;
1076    auto const &state = cb_node->lastBound[bind_point];
1077    PIPELINE_STATE *pPipe = state.pipeline_state;
1078    if (nullptr == pPipe) {
1079        result |= log_msg(
1080            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1081            HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
1082            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
1083        // Early return as any further checks below will be busted w/o a pipeline
1084        if (result) return true;
1085    }
1086    // First check flag states
1087    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1088        result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
1089
1090    // Now complete other state checks
1091    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1092        string errorString;
1093        auto pipeline_layout = pPipe->pipeline_layout;
1094
1095        for (const auto &set_binding_pair : pPipe->active_slots) {
1096            uint32_t setIndex = set_binding_pair.first;
1097            // If valid set is not bound throw an error
1098            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1099                result |=
1100                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1101                            HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
1102                            "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.",
1103                            HandleToUint64(pPipe->pipeline), setIndex);
1104            } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
1105                                                        errorString)) {
1106                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1107                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1108                result |=
1109                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1110                            HandleToUint64(setHandle), __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
1111                            "VkDescriptorSet (0x%" PRIxLEAST64
1112                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
1113                            HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
1114            } else {  // Valid set is bound and layout compatible, validate that it's updated
1115                // Pull the set node
1116                cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1117                // Validate the draw-time state for this descriptor set
1118                std::string err_str;
1119                if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], cb_node, function,
1120                                                       &err_str)) {
1121                    auto set = descriptor_set->GetSet();
1122                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1123                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), __LINE__,
1124                                      DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
1125                                      "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s time: %s",
1126                                      HandleToUint64(set), function, err_str.c_str());
1127                }
1128            }
1129        }
1130    }
1131
1132    // Check general pipeline state that needs to be validated at drawtime
1133    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, pPipe);
1134
1135    return result;
1136}
1137
1138static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1139    auto const &state = cb_state->lastBound[bind_point];
1140    PIPELINE_STATE *pPipe = state.pipeline_state;
1141    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1142        for (const auto &set_binding_pair : pPipe->active_slots) {
1143            uint32_t setIndex = set_binding_pair.first;
1144            // Pull the set node
1145            cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1146            // Bind this set and its active descriptor resources to the command buffer
1147            descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
1148            // For given active slots record updated images & buffers
1149            descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
1150        }
1151    }
1152    if (pPipe->vertexBindingDescriptions.size() > 0) {
1153        cb_state->vertex_buffer_used = true;
1154    }
1155}
1156
1157// Validate HW line width capabilities prior to setting requested line width.
1158static bool verifyLineWidth(layer_data *dev_data, DRAW_STATE_ERROR dsError, VulkanObjectType object_type, const uint64_t &target,
1159                            float lineWidth) {
1160    bool skip = false;
1161
1162    // First check to see if the physical device supports wide lines.
1163    if ((VK_FALSE == dev_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
1164        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target, __LINE__,
1165                        dsError, "DS",
1166                        "Attempt to set lineWidth to %f but physical device wideLines feature "
1167                        "not supported/enabled so lineWidth must be 1.0f!",
1168                        lineWidth);
1169    } else {
1170        // Otherwise, make sure the width falls in the valid range.
1171        if ((dev_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
1172            (dev_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
1173            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], target,
1174                            __LINE__, dsError, "DS",
1175                            "Attempt to set lineWidth to %f but physical device limits line width "
1176                            "to between [%f, %f]!",
1177                            lineWidth, dev_data->phys_dev_properties.properties.limits.lineWidthRange[0],
1178                            dev_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
1179        }
1180    }
1181
1182    return skip;
1183}
1184
1185static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<PIPELINE_STATE *> const &pPipelines, int pipelineIndex) {
1186    bool skip = false;
1187
1188    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
1189
1190    // If create derivative bit is set, check that we've specified a base
1191    // pipeline correctly, and that the base pipeline was created to allow
1192    // derivatives.
1193    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1194        PIPELINE_STATE *pBasePipeline = nullptr;
1195        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1196              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1197            // This check is a superset of VALIDATION_ERROR_096005a8 and VALIDATION_ERROR_096005aa
1198            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1199                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1200                            "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1201        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1202            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1203                skip |=
1204                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1205                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_208005a0, "DS",
1206                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
1207                            validation_error_map[VALIDATION_ERROR_208005a0]);
1208            } else {
1209                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
1210            }
1211        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1212            pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
1213        }
1214
1215        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1216            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1217                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1218                            "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1219        }
1220    }
1221
1222    return skip;
1223}
1224
1225// UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
1226static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<PIPELINE_STATE *> const &pPipelines, int pipelineIndex) {
1227    bool skip = false;
1228
1229        PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
1230
1231    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
1232    // produces nonsense errors that confuse users. Other layers should already
1233    // emit errors for renderpass being invalid.
1234    auto subpass_desc = &pPipeline->render_pass_ci.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1235    if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->render_pass_ci.subpassCount) {
1236        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1237                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ee, "DS",
1238                        "Invalid Pipeline CreateInfo State: Subpass index %u "
1239                            "is out of range for this renderpass (0..%u). %s",
1240                        pPipeline->graphicsPipelineCI.subpass, pPipeline->render_pass_ci.subpassCount - 1,
1241                        validation_error_map[VALIDATION_ERROR_096005ee]);
1242        subpass_desc = nullptr;
1243    }
1244
1245    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1246        const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1247        if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1248            skip |= log_msg(
1249                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1250                HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005d4, "DS",
1251                "vkCreateGraphicsPipelines(): Render pass (0x%" PRIxLEAST64
1252                ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u. %s",
1253                HandleToUint64(pPipeline->graphicsPipelineCI.renderPass), pPipeline->graphicsPipelineCI.subpass,
1254                subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount,
1255                validation_error_map[VALIDATION_ERROR_096005d4]);
1256        }
1257        if (!dev_data->enabled_features.independentBlend) {
1258            if (pPipeline->attachments.size() > 1) {
1259                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1260                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1261                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1262                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1263                    // only attachment state, so memcmp is best suited for the comparison
1264                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1265                               sizeof(pAttachments[0]))) {
1266                        skip |=
1267                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1268                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004ba, "DS",
1269                                    "Invalid Pipeline CreateInfo: If independent blend feature not "
1270                                    "enabled, all elements of pAttachments must be identical. %s",
1271                                    validation_error_map[VALIDATION_ERROR_0f4004ba]);
1272                        break;
1273                    }
1274                }
1275            }
1276        }
1277        if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1278            skip |=
1279                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1280                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004bc, "DS",
1281                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
1282                        validation_error_map[VALIDATION_ERROR_0f4004bc]);
1283        }
1284    }
1285
1286    if (validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
1287        skip = true;
1288    }
1289    // Each shader's stage must be unique
1290    if (pPipeline->duplicate_shaders) {
1291        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1292            if (pPipeline->duplicate_shaders & stage) {
1293                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1294                                HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1295                                "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1296                                string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1297            }
1298        }
1299    }
1300    // VS is required
1301    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1302        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1303                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ae, "DS",
1304                        "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
1305                        validation_error_map[VALIDATION_ERROR_096005ae]);
1306    }
1307    // Either both or neither TC/TE shaders should be defined
1308    bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1309    bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1310    if (has_control && !has_eval) {
1311        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1312                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b2, "DS",
1313                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1314                        validation_error_map[VALIDATION_ERROR_096005b2]);
1315    }
1316    if (!has_control && has_eval) {
1317        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1318                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b4, "DS",
1319                        "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1320                        validation_error_map[VALIDATION_ERROR_096005b4]);
1321    }
1322    // Compute shaders should be specified independent of Gfx shaders
1323    if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1324        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1325                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b0, "DS",
1326                        "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
1327                        validation_error_map[VALIDATION_ERROR_096005b0]);
1328    }
1329    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1330    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1331    if (has_control && has_eval &&
1332        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1333         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1334        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1335                        HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c0, "DS",
1336                        "Invalid Pipeline CreateInfo State: "
1337                        "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
1338                        "topology for tessellation pipelines. %s",
1339                        validation_error_map[VALIDATION_ERROR_096005c0]);
1340    }
1341    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1342        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1343        if (!has_control || !has_eval) {
1344            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1345                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c2, "DS",
1346                            "Invalid Pipeline CreateInfo State: "
1347                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
1348                            "topology is only valid for tessellation pipelines. %s",
1349                            validation_error_map[VALIDATION_ERROR_096005c2]);
1350        }
1351    }
1352
1353    // If a rasterization state is provided...
1354    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1355        // Make sure that the line width conforms to the HW.
1356        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
1357            skip |=
1358                verifyLineWidth(dev_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, kVulkanObjectTypePipeline,
1359                                HandleToUint64(pPipeline->pipeline), pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
1360        }
1361
1362        if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1363            (!dev_data->enabled_features.depthClamp)) {
1364            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1365                            HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_1020061c, "DS",
1366                            "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable "
1367                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE. %s",
1368                            validation_error_map[VALIDATION_ERROR_1020061c]);
1369        }
1370
1371        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1372            (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1373            (!dev_data->enabled_features.depthBiasClamp)) {
1374            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1375                            HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1376                            "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
1377                            "member of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1378                            "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1379        }
1380
1381        // If rasterization is enabled...
1382        if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1383            if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1384                (!dev_data->enabled_features.alphaToOne)) {
1385                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1386                                HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_10000622, "DS",
1387                                "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1388                                "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE. %s",
1389                                validation_error_map[VALIDATION_ERROR_10000622]);
1390            }
1391
1392            // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1393            if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1394                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1395                if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1396                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1397                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e0, "DS",
1398                                    "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is "
1399                                    "enabled and subpass uses a depth/stencil attachment. %s",
1400                                    validation_error_map[VALIDATION_ERROR_096005e0]);
1401
1402                } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1403                           (!dev_data->enabled_features.depthBounds)) {
1404                    skip |= log_msg(
1405                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1406                        HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1407                        "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the depthBoundsTestEnable "
1408                        "member of the VkPipelineDepthStencilStateCreateInfo structure must be set to VK_FALSE.");
1409                }
1410            }
1411
1412            // If subpass uses color attachments, pColorBlendState must be valid pointer
1413            if (subpass_desc) {
1414                uint32_t color_attachment_count = 0;
1415                for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1416                    if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1417                        ++color_attachment_count;
1418                    }
1419                }
1420                if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1421                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1422                                    HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e2, "DS",
1423                                    "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is "
1424                                    "enabled and subpass uses color attachments. %s",
1425                                    validation_error_map[VALIDATION_ERROR_096005e2]);
1426                }
1427            }
1428        }
1429    }
1430
1431    auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
1432    if (vi != NULL) {
1433        for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
1434            VkFormat format = vi->pVertexAttributeDescriptions[j].format;
1435            // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
1436            VkFormatProperties properties;
1437            dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format, &properties);
1438            if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
1439                skip |= log_msg(
1440                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1441                    __LINE__, VALIDATION_ERROR_14a004de, "IMAGE",
1442                    "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
1443                        "(%s) is not a supported vertex buffer format. %s",
1444                    pipelineIndex, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_14a004de]);
1445            }
1446        }
1447    }
1448
1449    return skip;
1450}
1451
1452// Free the Pipeline nodes
1453static void deletePipelines(layer_data *dev_data) {
1454    if (dev_data->pipelineMap.size() <= 0) return;
1455    for (auto &pipe_map_pair : dev_data->pipelineMap) {
1456        delete pipe_map_pair.second;
1457    }
1458    dev_data->pipelineMap.clear();
1459}
1460
1461// Block of code at start here specifically for managing/tracking DSs
1462
1463// Return Pool node ptr for specified pool or else NULL
1464DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
1465    auto pool_it = dev_data->descriptorPoolMap.find(pool);
1466    if (pool_it == dev_data->descriptorPoolMap.end()) {
1467        return NULL;
1468    }
1469    return pool_it->second;
1470}
1471
1472// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1473// func_str is the name of the calling function
1474// Return false if no errors occur
1475// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1476static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
1477    if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1478    bool skip = false;
1479    auto set_node = dev_data->setMap.find(set);
1480    if (set_node == dev_data->setMap.end()) {
1481        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1482                        HandleToUint64(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
1483                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
1484                        HandleToUint64(set));
1485    } else {
1486        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1487        if (set_node->second->in_use.load()) {
1488            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1489                            HandleToUint64(set), __LINE__, VALIDATION_ERROR_2860026a, "DS",
1490                            "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
1491                            func_str.c_str(), HandleToUint64(set), validation_error_map[VALIDATION_ERROR_2860026a]);
1492        }
1493    }
1494    return skip;
1495}
1496
1497// Remove set from setMap and delete the set
1498static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1499    dev_data->setMap.erase(descriptor_set->GetSet());
1500    delete descriptor_set;
1501}
1502// Free all DS Pools including their Sets & related sub-structs
1503// NOTE : Calls to this function should be wrapped in mutex
1504static void deletePools(layer_data *dev_data) {
1505    for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1506        // Remove this pools' sets from setMap and delete them
1507        for (auto ds : ii->second->sets) {
1508            freeDescriptorSet(dev_data, ds);
1509        }
1510        ii->second->sets.clear();
1511        delete ii->second;
1512        ii = dev_data->descriptorPoolMap.erase(ii);
1513    }
1514}
1515
1516static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
1517                                VkDescriptorPoolResetFlags flags) {
1518    DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1519    // TODO: validate flags
1520    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
1521    for (auto ds : pPool->sets) {
1522        freeDescriptorSet(dev_data, ds);
1523    }
1524    pPool->sets.clear();
1525    // Reset available count for each type and available sets for this pool
1526    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
1527        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
1528    }
1529    pPool->availableSets = pPool->maxSets;
1530}
1531
1532// For given CB object, fetch associated CB Node from map
1533GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
1534    auto it = dev_data->commandBufferMap.find(cb);
1535    if (it == dev_data->commandBufferMap.end()) {
1536        return NULL;
1537    }
1538    return it->second;
1539}
1540
1541// If a renderpass is active, verify that the given command type is appropriate for current subpass state
1542bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1543    if (!pCB->activeRenderPass) return false;
1544    bool skip = false;
1545    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1546        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
1547        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1548                        HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1549                        "Commands cannot be called in a subpass using secondary command buffers.");
1550    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1551        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1552                        HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1553                        "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1554    }
1555    return skip;
1556}
1557
1558bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
1559                           VkQueueFlags required_flags, UNIQUE_VALIDATION_ERROR_CODE error_code) {
1560    auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
1561    if (pool) {
1562        VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
1563        if (!(required_flags & queue_flags)) {
1564            string required_flags_string;
1565            for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1566                if (flag & required_flags) {
1567                    if (required_flags_string.size()) {
1568                        required_flags_string += " or ";
1569                    }
1570                    required_flags_string += string_VkQueueFlagBits(flag);
1571                }
1572            }
1573            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1574                           HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
1575                           "Cannot call %s on a command buffer allocated from a pool without %s capabilities. %s.", caller_name,
1576                           required_flags_string.c_str(), validation_error_map[error_code]);
1577        }
1578    }
1579    return false;
1580}
1581
1582static char const * GetCauseStr(VK_OBJECT obj) {
1583    if (obj.type == kVulkanObjectTypeDescriptorSet)
1584        return "destroyed or updated";
1585    if (obj.type == kVulkanObjectTypeCommandBuffer)
1586        return "destroyed or rerecorded";
1587    return "destroyed";
1588}
1589
1590static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
1591    bool skip = false;
1592    for (auto obj : cb_state->broken_bindings) {
1593        const char *type_str = object_string[obj.type];
1594        const char *cause_str = GetCauseStr(obj);
1595        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1596                        HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1597                        "You are adding %s to command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
1598                        call_source, cb_state->commandBuffer, type_str, obj.handle, cause_str);
1599    }
1600    return skip;
1601}
1602
1603// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1604// there's an issue with the Cmd ordering
1605bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1606    switch (cb_state->state) {
1607        case CB_RECORDING:
1608            return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1609
1610        case CB_INVALID_COMPLETE:
1611        case CB_INVALID_INCOMPLETE:
1612            return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1613
1614        default:
1615            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1616                           HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
1617                           "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
1618    }
1619}
1620
1621// For given object struct return a ptr of BASE_NODE type for its wrapping struct
1622BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
1623    BASE_NODE *base_ptr = nullptr;
1624    switch (object_struct.type) {
1625        case kVulkanObjectTypeDescriptorSet: {
1626            base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
1627            break;
1628        }
1629        case kVulkanObjectTypeSampler: {
1630            base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
1631            break;
1632        }
1633        case kVulkanObjectTypeQueryPool: {
1634            base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
1635            break;
1636        }
1637        case kVulkanObjectTypePipeline: {
1638            base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
1639            break;
1640        }
1641        case kVulkanObjectTypeBuffer: {
1642            base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
1643            break;
1644        }
1645        case kVulkanObjectTypeBufferView: {
1646            base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
1647            break;
1648        }
1649        case kVulkanObjectTypeImage: {
1650            base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
1651            break;
1652        }
1653        case kVulkanObjectTypeImageView: {
1654            base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
1655            break;
1656        }
1657        case kVulkanObjectTypeEvent: {
1658            base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
1659            break;
1660        }
1661        case kVulkanObjectTypeDescriptorPool: {
1662            base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
1663            break;
1664        }
1665        case kVulkanObjectTypeCommandPool: {
1666            base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
1667            break;
1668        }
1669        case kVulkanObjectTypeFramebuffer: {
1670            base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
1671            break;
1672        }
1673        case kVulkanObjectTypeRenderPass: {
1674            base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
1675            break;
1676        }
1677        case kVulkanObjectTypeDeviceMemory: {
1678            base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
1679            break;
1680        }
1681        default:
1682            // TODO : Any other objects to be handled here?
1683            assert(0);
1684            break;
1685    }
1686    return base_ptr;
1687}
1688
1689// Tie the VK_OBJECT to the cmd buffer which includes:
1690//  Add object_binding to cmd buffer
1691//  Add cb_binding to object
1692static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
1693    cb_bindings->insert(cb_node);
1694    cb_node->object_bindings.insert(obj);
1695}
1696// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
1697static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
1698    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
1699    if (base_obj) base_obj->cb_bindings.erase(cb_node);
1700}
1701// Reset the command buffer state
1702//  Maintain the createInfo and set state to CB_NEW, but clear all other state
1703static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
1704    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
1705    if (pCB) {
1706        pCB->in_use.store(0);
1707        // Reset CB state (note that createInfo is not cleared)
1708        pCB->commandBuffer = cb;
1709        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1710        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
1711        pCB->hasDrawCmd = false;
1712        pCB->state = CB_NEW;
1713        pCB->submitCount = 0;
1714        pCB->status = 0;
1715        pCB->viewportMask = 0;
1716        pCB->scissorMask = 0;
1717
1718        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
1719            pCB->lastBound[i].reset();
1720        }
1721
1722        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
1723        pCB->activeRenderPass = nullptr;
1724        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
1725        pCB->activeSubpass = 0;
1726        pCB->broken_bindings.clear();
1727        pCB->waitedEvents.clear();
1728        pCB->events.clear();
1729        pCB->writeEventsBeforeWait.clear();
1730        pCB->waitedEventsBeforeQueryReset.clear();
1731        pCB->queryToStateMap.clear();
1732        pCB->activeQueries.clear();
1733        pCB->startedQueries.clear();
1734        pCB->imageLayoutMap.clear();
1735        pCB->eventToStageMap.clear();
1736        pCB->drawData.clear();
1737        pCB->currentDrawData.buffers.clear();
1738        pCB->vertex_buffer_used = false;
1739        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
1740        // If secondary, invalidate any primary command buffer that may call us.
1741        if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1742            invalidateCommandBuffers(dev_data,
1743                                     pCB->linkedCommandBuffers,
1744                                     {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
1745        }
1746
1747        // Remove reverse command buffer links.
1748        for (auto pSubCB : pCB->linkedCommandBuffers) {
1749            pSubCB->linkedCommandBuffers.erase(pCB);
1750        }
1751        pCB->linkedCommandBuffers.clear();
1752        pCB->updateImages.clear();
1753        pCB->updateBuffers.clear();
1754        clear_cmd_buf_and_mem_references(dev_data, pCB);
1755        pCB->validate_functions.clear();
1756        pCB->eventUpdates.clear();
1757        pCB->queryUpdates.clear();
1758
1759        // Remove object bindings
1760        for (auto obj : pCB->object_bindings) {
1761            removeCommandBufferBinding(dev_data, &obj, pCB);
1762        }
1763        pCB->object_bindings.clear();
1764        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
1765        for (auto framebuffer : pCB->framebuffers) {
1766            auto fb_state = GetFramebufferState(dev_data, framebuffer);
1767            if (fb_state) fb_state->cb_bindings.erase(pCB);
1768        }
1769        pCB->framebuffers.clear();
1770        pCB->activeFramebuffer = VK_NULL_HANDLE;
1771    }
1772}
1773
1774// Set PSO-related status bits for CB, including dynamic state set via PSO
1775static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
1776    // Account for any dynamic state not set via this PSO
1777    if (!pPipe->graphicsPipelineCI.pDynamicState ||
1778        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) {  // All state is static
1779        pCB->status |= CBSTATUS_ALL_STATE_SET;
1780    } else {
1781        // First consider all state on
1782        // Then unset any state that's noted as dynamic in PSO
1783        // Finally OR that into CB statemask
1784        CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
1785        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
1786            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
1787                case VK_DYNAMIC_STATE_LINE_WIDTH:
1788                    psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
1789                    break;
1790                case VK_DYNAMIC_STATE_DEPTH_BIAS:
1791                    psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
1792                    break;
1793                case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1794                    psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
1795                    break;
1796                case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1797                    psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
1798                    break;
1799                case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1800                    psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
1801                    break;
1802                case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1803                    psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
1804                    break;
1805                case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1806                    psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
1807                    break;
1808                default:
1809                    // TODO : Flag error here
1810                    break;
1811            }
1812        }
1813        pCB->status |= psoDynStateMask;
1814    }
1815}
1816
1817// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
1818// render pass.
1819bool insideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName,
1820                      UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1821    bool inside = false;
1822    if (pCB->activeRenderPass) {
1823        inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1824                         HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1825                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
1826                         HandleToUint64(pCB->activeRenderPass->renderPass), validation_error_map[msgCode]);
1827    }
1828    return inside;
1829}
1830
1831// Flags validation error if the associated call is made outside a render pass. The apiName
1832// routine should ONLY be called inside a render pass.
1833bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1834    bool outside = false;
1835    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
1836        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
1837         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
1838        outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1839                          HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1840                          "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
1841    }
1842    return outside;
1843}
1844
1845static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
1846    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
1847}
1848
1849// For the given ValidationCheck enum, set all relevant instance disabled flags to true
1850void SetDisabledFlags(instance_layer_data *instance_data, VkValidationFlagsEXT *val_flags_struct) {
1851    for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
1852        switch (val_flags_struct->pDisabledValidationChecks[i]) {
1853            case VK_VALIDATION_CHECK_SHADERS_EXT:
1854                instance_data->disabled.shader_validation = true;
1855                break;
1856            case VK_VALIDATION_CHECK_ALL_EXT:
1857                // Set all disabled flags to true
1858                instance_data->disabled.SetAll(true);
1859                break;
1860            default:
1861                break;
1862        }
1863    }
1864}
1865
1866VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
1867                                              VkInstance *pInstance) {
1868    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
1869
1870    assert(chain_info->u.pLayerInfo);
1871    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
1872    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
1873    if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
1874
1875    // Advance the link info for the next element on the chain
1876    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
1877
1878    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
1879    if (result != VK_SUCCESS) return result;
1880
1881    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
1882    instance_data->instance = *pInstance;
1883    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
1884    instance_data->report_data = debug_report_create_instance(
1885        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
1886    instance_data->extensions.InitFromInstanceCreateInfo(pCreateInfo);
1887    init_core_validation(instance_data, pAllocator);
1888
1889    ValidateLayerOrdering(*pCreateInfo);
1890    // Parse any pNext chains
1891    if (pCreateInfo->pNext) {
1892        GENERIC_HEADER *struct_header = (GENERIC_HEADER *)pCreateInfo->pNext;
1893        while (struct_header) {
1894            // Check for VkValidationFlagsExt
1895            if (VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT == struct_header->sType) {
1896                SetDisabledFlags(instance_data, (VkValidationFlagsEXT *)struct_header);
1897            }
1898            struct_header = (GENERIC_HEADER *)struct_header->pNext;
1899        }
1900    }
1901
1902    return result;
1903}
1904
1905// Hook DestroyInstance to remove tableInstanceMap entry
1906VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
1907    // TODOSC : Shouldn't need any customization here
1908    dispatch_key key = get_dispatch_key(instance);
1909    // TBD: Need any locking this early, in case this function is called at the
1910    // same time by more than one thread?
1911    instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
1912    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
1913
1914    lock_guard_t lock(global_lock);
1915    // Clean up logging callback, if any
1916    while (instance_data->logging_callback.size() > 0) {
1917        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
1918        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
1919        instance_data->logging_callback.pop_back();
1920    }
1921
1922    layer_debug_report_destroy_instance(instance_data->report_data);
1923    FreeLayerDataPtr(key, instance_layer_data_map);
1924}
1925
1926static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1927                                              uint32_t requested_queue_family, int32_t err_code, const char *cmd_name,
1928                                              const char *queue_family_var_name, const char *vu_note = nullptr) {
1929    bool skip = false;
1930
1931    if (!vu_note) vu_note = validation_error_map[err_code];
1932
1933    const char *conditional_ext_cmd =
1934        instance_data->extensions.vk_khr_get_physical_device_properties_2 ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR" : "";
1935
1936    std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
1937                                 ? "the pQueueFamilyPropertyCount was never obtained"
1938                                 : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
1939
1940    if (requested_queue_family >= pd_state->queue_family_count) {
1941        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
1942                        HandleToUint64(pd_state->phys_device), __LINE__, err_code, "DL",
1943                        "%s: %s (= %" PRIu32
1944                        ") is not less than any previously obtained pQueueFamilyPropertyCount from "
1945                        "vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
1946                        cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str(), vu_note);
1947    }
1948    return skip;
1949}
1950
1951// Verify VkDeviceQueueCreateInfos
1952static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
1953                                           uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
1954    bool skip = false;
1955
1956    for (uint32_t i = 0; i < info_count; ++i) {
1957        const auto requested_queue_family = infos[i].queueFamilyIndex;
1958
1959        // Verify that requested queue family is known to be valid at this point in time
1960        std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
1961        skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family, VALIDATION_ERROR_06c002fa,
1962                                                  "vkCreateDevice", queue_family_var_name.c_str());
1963
1964        // Verify that requested  queue count of queue family is known to be valid at this point in time
1965        if (requested_queue_family < pd_state->queue_family_count) {
1966            const auto requested_queue_count = infos[i].queueCount;
1967            const auto queue_family_props_count = pd_state->queue_family_properties.size();
1968            const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
1969            const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
1970                                                  ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR"
1971                                                  : "";
1972            std::string count_note =
1973                !queue_family_has_props
1974                    ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
1975                    : "i.e. is not less than or equal to " +
1976                          std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
1977
1978            if (!queue_family_has_props ||
1979                requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
1980                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1981                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(pd_state->phys_device), __LINE__,
1982                                VALIDATION_ERROR_06c002fc, "DL",
1983                                "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
1984                                ") is not "
1985                                "less than or equal to available queue count for this "
1986                                "pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex} (=%" PRIu32
1987                                ") obtained previously "
1988                                "from vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
1989                                i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str(),
1990                                validation_error_map[VALIDATION_ERROR_06c002fc]);
1991            }
1992        }
1993    }
1994
1995    return skip;
1996}
1997
1998// Verify that features have been queried and that they are available
1999static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2000                                      const VkPhysicalDeviceFeatures *requested_features) {
2001    bool skip = false;
2002
2003    const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features);
2004    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
2005    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
2006    //  Need to provide the struct member name with the issue. To do that seems like we'll
2007    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
2008    uint32_t errors = 0;
2009    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2010    for (uint32_t i = 0; i < total_bools; i++) {
2011        if (requested[i] > actual[i]) {
2012            // TODO: Add index to struct member name helper to be able to include a feature name
2013            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2014                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
2015                            "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
2016                            "which is not available on this device.",
2017                            i);
2018            errors++;
2019        }
2020    }
2021    if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
2022        // If user didn't request features, notify them that they should
2023        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
2024        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2025                        0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
2026                        "You requested features that are unavailable on this device. You should first query feature "
2027                        "availability by calling vkGetPhysicalDeviceFeatures().");
2028    }
2029    return skip;
2030}
2031
2032VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2033                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2034    bool skip = false;
2035    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2036
2037    unique_lock_t lock(global_lock);
2038    auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
2039
2040    // TODO: object_tracker should perhaps do this instead
2041    //       and it does not seem to currently work anyway -- the loader just crashes before this point
2042    if (!GetPhysicalDeviceState(instance_data, gpu)) {
2043        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2044                        0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
2045                        "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2046    }
2047
2048    // Check that any requested features are available
2049    if (pCreateInfo->pEnabledFeatures) {
2050        skip |= ValidateRequestedFeatures(instance_data, pd_state, pCreateInfo->pEnabledFeatures);
2051    }
2052    skip |=
2053        ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2054
2055    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2056
2057    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2058
2059    assert(chain_info->u.pLayerInfo);
2060    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2061    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2062    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
2063    if (fpCreateDevice == NULL) {
2064        return VK_ERROR_INITIALIZATION_FAILED;
2065    }
2066
2067    // Advance the link info for the next element on the chain
2068    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2069
2070    lock.unlock();
2071
2072    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
2073    if (result != VK_SUCCESS) {
2074        return result;
2075    }
2076
2077    lock.lock();
2078    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
2079
2080    device_data->instance_data = instance_data;
2081    // Setup device dispatch table
2082    layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
2083    device_data->device = *pDevice;
2084    // Save PhysicalDevice handle
2085    device_data->physical_device = gpu;
2086
2087    device_data->report_data = layer_debug_report_create_device(instance_data->report_data, *pDevice);
2088    device_data->extensions.InitFromDeviceCreateInfo(&instance_data->extensions, pCreateInfo);
2089
2090    // Get physical device limits for this device
2091    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
2092    uint32_t count;
2093    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2094    device_data->phys_dev_properties.queue_family_properties.resize(count);
2095    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
2096        gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
2097    // TODO: device limits should make sure these are compatible
2098    if (pCreateInfo->pEnabledFeatures) {
2099        device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
2100    } else {
2101        memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
2102    }
2103    // Store physical device properties and physical device mem limits into device layer_data structs
2104    instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
2105    instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
2106    lock.unlock();
2107
2108    ValidateLayerOrdering(*pCreateInfo);
2109
2110    return result;
2111}
2112
2113// prototype
2114VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2115    // TODOSC : Shouldn't need any customization here
2116    dispatch_key key = get_dispatch_key(device);
2117    layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
2118    // Free all the memory
2119    unique_lock_t lock(global_lock);
2120    deletePipelines(dev_data);
2121    dev_data->renderPassMap.clear();
2122    for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
2123        delete (*ii).second;
2124    }
2125    dev_data->commandBufferMap.clear();
2126    // This will also delete all sets in the pool & remove them from setMap
2127    deletePools(dev_data);
2128    // All sets should be removed
2129    assert(dev_data->setMap.empty());
2130    dev_data->descriptorSetLayoutMap.clear();
2131    dev_data->imageViewMap.clear();
2132    dev_data->imageMap.clear();
2133    dev_data->imageSubresourceMap.clear();
2134    dev_data->imageLayoutMap.clear();
2135    dev_data->bufferViewMap.clear();
2136    dev_data->bufferMap.clear();
2137    // Queues persist until device is destroyed
2138    dev_data->queueMap.clear();
2139    // Report any memory leaks
2140    layer_debug_report_destroy_device(device);
2141    lock.unlock();
2142
2143#if DISPATCH_MAP_DEBUG
2144    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
2145#endif
2146
2147    dev_data->dispatch_table.DestroyDevice(device, pAllocator);
2148    FreeLayerDataPtr(key, layer_data_map);
2149}
2150
2151static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
2152
2153// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2154//   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
2155static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2156                                         UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
2157    bool skip = false;
2158    if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2159        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2160                        geo_error_id, "DL",
2161                        "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when "
2162                        "device does not have geometryShader feature enabled. %s",
2163                        caller, validation_error_map[geo_error_id]);
2164    }
2165    if (!dev_data->enabled_features.tessellationShader &&
2166        (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2167        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2168                        tess_error_id, "DL",
2169                        "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT "
2170                        "and/or VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device "
2171                        "does not have tessellationShader feature enabled. %s",
2172                        caller, validation_error_map[tess_error_id]);
2173    }
2174    return skip;
2175}
2176
2177// Loop through bound objects and increment their in_use counts.
2178static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2179    for (auto obj : cb_node->object_bindings) {
2180        auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2181        if (base_obj) {
2182            base_obj->in_use.fetch_add(1);
2183        }
2184    }
2185}
2186// Track which resources are in-flight by atomically incrementing their "in_use" count
2187static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2188    cb_node->submitCount++;
2189    cb_node->in_use.fetch_add(1);
2190
2191    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2192    IncrementBoundObjects(dev_data, cb_node);
2193    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2194    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2195    //  should then be flagged prior to calling this function
2196    for (auto drawDataElement : cb_node->drawData) {
2197        for (auto buffer : drawDataElement.buffers) {
2198            auto buffer_state = GetBufferState(dev_data, buffer);
2199            if (buffer_state) {
2200                buffer_state->in_use.fetch_add(1);
2201            }
2202        }
2203    }
2204    for (auto event : cb_node->writeEventsBeforeWait) {
2205        auto event_state = GetEventNode(dev_data, event);
2206        if (event_state) event_state->write_in_use++;
2207    }
2208}
2209
2210// Note: This function assumes that the global lock is held by the calling thread.
2211// For the given queue, verify the queue state up to the given seq number.
2212// Currently the only check is to make sure that if there are events to be waited on prior to
2213//  a QueryReset, make sure that all such events have been signalled.
2214static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2215    bool skip = false;
2216
2217    // sequence number we want to validate up to, per queue
2218    std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs { { initial_queue, initial_seq } };
2219    // sequence number we've completed validation for, per queue
2220    std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2221    std::vector<QUEUE_STATE *> worklist { initial_queue };
2222
2223    while (worklist.size()) {
2224        auto queue = worklist.back();
2225        worklist.pop_back();
2226
2227        auto target_seq = target_seqs[queue];
2228        auto seq = std::max(done_seqs[queue], queue->seq);
2229        auto sub_it = queue->submissions.begin() + int(seq - queue->seq);  // seq >= queue->seq
2230
2231        for (; seq < target_seq; ++sub_it, ++seq) {
2232            for (auto &wait : sub_it->waitSemaphores) {
2233                auto other_queue = GetQueueState(dev_data, wait.queue);
2234
2235                if (other_queue == queue)
2236                    continue;   // semaphores /always/ point backwards, so no point here.
2237
2238                auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2239                auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2240
2241                // if this wait is for another queue, and covers new sequence
2242                // numbers beyond what we've already validated, mark the new
2243                // target seq and (possibly-re)add the queue to the worklist.
2244                if (other_done_seq < other_target_seq) {
2245                    target_seqs[other_queue] = other_target_seq;
2246                    worklist.push_back(other_queue);
2247                }
2248            }
2249
2250            for (auto cb : sub_it->cbs) {
2251                auto cb_node = GetCBNode(dev_data, cb);
2252                if (cb_node) {
2253                    for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
2254                        for (auto event : queryEventsPair.second) {
2255                            if (dev_data->eventMap[event].needsSignaled) {
2256                                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2257                                                VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
2258                                                "Cannot get query results on queryPool 0x%" PRIx64
2259                                                " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
2260                                                HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
2261                                                HandleToUint64(event));
2262                            }
2263                        }
2264                    }
2265                }
2266            }
2267        }
2268
2269        // finally mark the point we've now validated this queue to.
2270        done_seqs[queue] = seq;
2271    }
2272
2273    return skip;
2274}
2275
2276// When the given fence is retired, verify outstanding queue operations through the point of the fence
2277static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2278    auto fence_state = GetFenceNode(dev_data, fence);
2279    if (VK_NULL_HANDLE != fence_state->signaler.first) {
2280        return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
2281    }
2282    return false;
2283}
2284
2285// Decrement in-use count for objects bound to command buffer
2286static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2287    BASE_NODE *base_obj = nullptr;
2288    for (auto obj : cb_node->object_bindings) {
2289        base_obj = GetStateStructPtrFromObject(dev_data, obj);
2290        if (base_obj) {
2291            base_obj->in_use.fetch_sub(1);
2292        }
2293    }
2294}
2295
2296static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2297    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2298
2299    // Roll this queue forward, one submission at a time.
2300    while (pQueue->seq < seq) {
2301        auto &submission = pQueue->submissions.front();
2302
2303        for (auto &wait : submission.waitSemaphores) {
2304            auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
2305            if (pSemaphore) {
2306                pSemaphore->in_use.fetch_sub(1);
2307            }
2308            auto &lastSeq = otherQueueSeqs[wait.queue];
2309            lastSeq = std::max(lastSeq, wait.seq);
2310        }
2311
2312        for (auto &semaphore : submission.signalSemaphores) {
2313            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2314            if (pSemaphore) {
2315                pSemaphore->in_use.fetch_sub(1);
2316            }
2317        }
2318
2319        for (auto cb : submission.cbs) {
2320            auto cb_node = GetCBNode(dev_data, cb);
2321            if (!cb_node) {
2322                continue;
2323            }
2324            // First perform decrement on general case bound objects
2325            DecrementBoundResources(dev_data, cb_node);
2326            for (auto drawDataElement : cb_node->drawData) {
2327                for (auto buffer : drawDataElement.buffers) {
2328                    auto buffer_state = GetBufferState(dev_data, buffer);
2329                    if (buffer_state) {
2330                        buffer_state->in_use.fetch_sub(1);
2331                    }
2332                }
2333            }
2334            for (auto event : cb_node->writeEventsBeforeWait) {
2335                auto eventNode = dev_data->eventMap.find(event);
2336                if (eventNode != dev_data->eventMap.end()) {
2337                    eventNode->second.write_in_use--;
2338                }
2339            }
2340            for (auto queryStatePair : cb_node->queryToStateMap) {
2341                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2342            }
2343            for (auto eventStagePair : cb_node->eventToStageMap) {
2344                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2345            }
2346
2347            cb_node->in_use.fetch_sub(1);
2348        }
2349
2350        auto pFence = GetFenceNode(dev_data, submission.fence);
2351        if (pFence) {
2352            pFence->state = FENCE_RETIRED;
2353        }
2354
2355        pQueue->submissions.pop_front();
2356        pQueue->seq++;
2357    }
2358
2359    // Roll other queues forward to the highest seq we saw a wait for
2360    for (auto qs : otherQueueSeqs) {
2361        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
2362    }
2363}
2364
2365// Submit a fence to a queue, delimiting previous fences and previous untracked
2366// work by it.
2367static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2368    pFence->state = FENCE_INFLIGHT;
2369    pFence->signaler.first = pQueue->queue;
2370    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2371}
2372
2373static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2374    bool skip = false;
2375    if ((pCB->in_use.load() || current_submit_count > 1) &&
2376        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2377        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2378                        __LINE__, VALIDATION_ERROR_31a0008e, "DS",
2379                        "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
2380                        validation_error_map[VALIDATION_ERROR_31a0008e]);
2381    }
2382    return skip;
2383}
2384
2385static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2386                                       int current_submit_count, UNIQUE_VALIDATION_ERROR_CODE vu_id) {
2387    bool skip = false;
2388    if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2389    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2390    if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2391        (cb_state->submitCount + current_submit_count > 1)) {
2392        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2393                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
2394                        "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
2395                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
2396                        cb_state->commandBuffer, cb_state->submitCount + current_submit_count);
2397    }
2398
2399    // Validate that cmd buffers have been updated
2400    switch (cb_state->state) {
2401        case CB_INVALID_INCOMPLETE:
2402        case CB_INVALID_COMPLETE:
2403            skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2404            break;
2405
2406        case CB_NEW:
2407            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2408                            (uint64_t)(cb_state->commandBuffer), __LINE__, vu_id, "DS",
2409                            "Command buffer 0x%p used in the call to %s is unrecorded and contains no commands. %s",
2410                            cb_state->commandBuffer, call_source, validation_error_map[vu_id]);
2411            break;
2412
2413        case CB_RECORDING:
2414            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2415                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
2416                            "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!",
2417                            cb_state->commandBuffer, call_source);
2418            break;
2419
2420        default: /* recorded */
2421            break;
2422    }
2423    return skip;
2424}
2425
2426static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2427    bool skip = false;
2428
2429    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2430    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2431    //  should then be flagged prior to calling this function
2432    for (auto drawDataElement : cb_node->drawData) {
2433        for (auto buffer : drawDataElement.buffers) {
2434            auto buffer_state = GetBufferState(dev_data, buffer);
2435            if (!buffer_state) {
2436                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2437                                HandleToUint64(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
2438                                "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", HandleToUint64(buffer));
2439            }
2440        }
2441    }
2442    return skip;
2443}
2444
2445// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
2446bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
2447                           const uint32_t *indices) {
2448    bool found = false;
2449    bool skip = false;
2450    auto queue_state = GetQueueState(dev_data, queue);
2451    if (queue_state) {
2452        for (uint32_t i = 0; i < count; i++) {
2453            if (indices[i] == queue_state->queueFamilyIndex) {
2454                found = true;
2455                break;
2456            }
2457        }
2458
2459        if (!found) {
2460            skip = log_msg(
2461                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type], object->handle, __LINE__,
2462                DRAWSTATE_INVALID_QUEUE_FAMILY, "DS", "vkQueueSubmit: Command buffer 0x%" PRIxLEAST64 " contains %s 0x%" PRIxLEAST64
2463                                                      " which was not created allowing concurrent access to this queue family %d.",
2464                HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle, queue_state->queueFamilyIndex);
2465        }
2466    }
2467    return skip;
2468}
2469
2470// Validate that queueFamilyIndices of primary command buffers match this queue
2471// Secondary command buffers were previously validated in vkCmdExecuteCommands().
2472static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2473    bool skip = false;
2474    auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
2475    auto queue_state = GetQueueState(dev_data, queue);
2476
2477    if (pPool && queue_state) {
2478        if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2479            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2480                            HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_31a00094, "DS",
2481                            "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
2482                            "0x%p from queue family %d. %s",
2483                            pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
2484                            validation_error_map[VALIDATION_ERROR_31a00094]);
2485        }
2486
2487        // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2488        for (auto object : pCB->object_bindings) {
2489            if (object.type == kVulkanObjectTypeImage) {
2490                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
2491                if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2492                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2493                                                  image_state->createInfo.pQueueFamilyIndices);
2494                }
2495            } else if (object.type == kVulkanObjectTypeBuffer) {
2496                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
2497                if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2498                    skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2499                                                  buffer_state->createInfo.pQueueFamilyIndices);
2500                }
2501            }
2502        }
2503    }
2504
2505    return skip;
2506}
2507
2508static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2509    // Track in-use for resources off of primary and any secondary CBs
2510    bool skip = false;
2511
2512    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2513    // on device
2514    skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2515
2516    skip |= validateResources(dev_data, pCB);
2517
2518    for (auto pSubCB : pCB->linkedCommandBuffers) {
2519        skip |= validateResources(dev_data, pSubCB);
2520        // TODO: replace with invalidateCommandBuffers() at recording.
2521        if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2522            !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2523            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2524                    __LINE__, VALIDATION_ERROR_31a00092, "DS",
2525                    "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
2526                    "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
2527                    pCB->commandBuffer, pSubCB->commandBuffer, pSubCB->primaryCommandBuffer,
2528                    validation_error_map[VALIDATION_ERROR_31a00092]);
2529        }
2530    }
2531
2532    skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count, VALIDATION_ERROR_31a00090);
2533
2534    return skip;
2535}
2536
2537static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2538    bool skip = false;
2539
2540    if (pFence) {
2541        if (pFence->state == FENCE_INFLIGHT) {
2542            // TODO: opportunities for VALIDATION_ERROR_31a00080, VALIDATION_ERROR_316008b4, VALIDATION_ERROR_16400a0e
2543            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2544                            HandleToUint64(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
2545                            "Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
2546        }
2547
2548        else if (pFence->state == FENCE_RETIRED) {
2549            // TODO: opportunities for VALIDATION_ERROR_31a0007e, VALIDATION_ERROR_316008b2, VALIDATION_ERROR_16400a0e
2550            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2551                            HandleToUint64(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2552                            "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
2553                            HandleToUint64(pFence->fence));
2554        }
2555    }
2556
2557    return skip;
2558}
2559
2560static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2561                                      VkFence fence) {
2562    auto pQueue = GetQueueState(dev_data, queue);
2563    auto pFence = GetFenceNode(dev_data, fence);
2564
2565    // Mark the fence in-use.
2566    if (pFence) {
2567        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2568    }
2569
2570    // Now process each individual submit
2571    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2572        std::vector<VkCommandBuffer> cbs;
2573        const VkSubmitInfo *submit = &pSubmits[submit_idx];
2574        vector<SEMAPHORE_WAIT> semaphore_waits;
2575        vector<VkSemaphore> semaphore_signals;
2576        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2577            VkSemaphore semaphore = submit->pWaitSemaphores[i];
2578            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2579            if (pSemaphore) {
2580                if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
2581                    semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
2582                    pSemaphore->in_use.fetch_add(1);
2583                }
2584                pSemaphore->signaler.first = VK_NULL_HANDLE;
2585                pSemaphore->signaled = false;
2586            }
2587        }
2588        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2589            VkSemaphore semaphore = submit->pSignalSemaphores[i];
2590            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2591            if (pSemaphore) {
2592                pSemaphore->signaler.first = queue;
2593                pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
2594                pSemaphore->signaled = true;
2595                pSemaphore->in_use.fetch_add(1);
2596                semaphore_signals.push_back(semaphore);
2597            }
2598        }
2599        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2600            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2601            if (cb_node) {
2602                cbs.push_back(submit->pCommandBuffers[i]);
2603                for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2604                    cbs.push_back(secondaryCmdBuffer->commandBuffer);
2605                }
2606                UpdateCmdBufImageLayouts(dev_data, cb_node);
2607                incrementResources(dev_data, cb_node);
2608                for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2609                    incrementResources(dev_data, secondaryCmdBuffer);
2610                }
2611            }
2612        }
2613        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
2614                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
2615    }
2616
2617    if (pFence && !submitCount) {
2618        // If no submissions, but just dropping a fence on the end of the queue,
2619        // record an empty submission with just the fence, so we can determine
2620        // its completion.
2621        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
2622                                         fence);
2623    }
2624}
2625
2626static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2627                                       VkFence fence) {
2628    auto pFence = GetFenceNode(dev_data, fence);
2629    bool skip = ValidateFenceForSubmit(dev_data, pFence);
2630    if (skip) {
2631        return true;
2632    }
2633
2634    unordered_set<VkSemaphore> signaled_semaphores;
2635    unordered_set<VkSemaphore> unsignaled_semaphores;
2636    vector<VkCommandBuffer> current_cmds;
2637    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
2638    // Now verify each individual submit
2639    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2640        const VkSubmitInfo *submit = &pSubmits[submit_idx];
2641        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2642            skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
2643                                                 VALIDATION_ERROR_13c00098, VALIDATION_ERROR_13c0009a);
2644            VkSemaphore semaphore = submit->pWaitSemaphores[i];
2645            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2646            if (pSemaphore) {
2647                if (unsignaled_semaphores.count(semaphore) ||
2648                    (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
2649                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2650                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2651                                    "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
2652                                    HandleToUint64(semaphore));
2653                } else {
2654                    signaled_semaphores.erase(semaphore);
2655                    unsignaled_semaphores.insert(semaphore);
2656                }
2657            }
2658        }
2659        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2660            VkSemaphore semaphore = submit->pSignalSemaphores[i];
2661            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2662            if (pSemaphore) {
2663                if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
2664                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2665                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2666                                    "Queue 0x%p is signaling semaphore 0x%" PRIx64
2667                                    " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
2668                                    queue, HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
2669                } else {
2670                    unsignaled_semaphores.erase(semaphore);
2671                    signaled_semaphores.insert(semaphore);
2672                }
2673            }
2674        }
2675        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2676            auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2677            if (cb_node) {
2678                skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, dev_data->imageLayoutMap, localImageLayoutMap);
2679                current_cmds.push_back(submit->pCommandBuffers[i]);
2680                skip |= validatePrimaryCommandBufferState(
2681                    dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
2682                skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
2683
2684                // Potential early exit here as bad object state may crash in delayed function calls
2685                if (skip) {
2686                    return true;
2687                }
2688
2689                // Call submit-time functions to validate/update state
2690                for (auto &function : cb_node->validate_functions) {
2691                    skip |= function();
2692                }
2693                for (auto &function : cb_node->eventUpdates) {
2694                    skip |= function(queue);
2695                }
2696                for (auto &function : cb_node->queryUpdates) {
2697                    skip |= function(queue);
2698                }
2699            }
2700        }
2701    }
2702    return skip;
2703}
2704
2705VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
2706    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2707    unique_lock_t lock(global_lock);
2708
2709    bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2710    lock.unlock();
2711
2712    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2713
2714    VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
2715
2716    lock.lock();
2717    PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2718    lock.unlock();
2719    return result;
2720}
2721
2722static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
2723    bool skip = false;
2724    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
2725        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2726                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_16c004f8, "MEM",
2727                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
2728                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
2729                        validation_error_map[VALIDATION_ERROR_16c004f8]);
2730    }
2731    return skip;
2732}
2733
2734static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
2735    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
2736    return;
2737}
2738
2739VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
2740                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
2741    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
2742    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2743    unique_lock_t lock(global_lock);
2744    bool skip = PreCallValidateAllocateMemory(dev_data);
2745    if (!skip) {
2746        lock.unlock();
2747        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
2748        lock.lock();
2749        if (VK_SUCCESS == result) {
2750            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
2751        }
2752    }
2753    return result;
2754}
2755
2756// For given obj node, if it is use, flag a validation error and return callback result, else return false
2757bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
2758                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
2759    if (dev_data->instance_data->disabled.object_in_use) return false;
2760    bool skip = false;
2761    if (obj_node->in_use.load()) {
2762        skip |=
2763            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
2764                    __LINE__, error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
2765                    object_string[obj_struct.type], obj_struct.handle, validation_error_map[error_code]);
2766    }
2767    return skip;
2768}
2769
2770static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
2771    *mem_info = GetMemObjInfo(dev_data, mem);
2772    *obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
2773    if (dev_data->instance_data->disabled.free_memory) return false;
2774    bool skip = false;
2775    if (*mem_info) {
2776        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_2880054a);
2777    }
2778    return skip;
2779}
2780
2781static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
2782    // Clear mem binding for any bound objects
2783    for (auto obj : mem_info->obj_bindings) {
2784        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle, __LINE__,
2785                MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
2786                obj.handle, HandleToUint64(mem_info->mem));
2787        switch (obj.type) {
2788            case kVulkanObjectTypeImage: {
2789                auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
2790                assert(image_state);  // Any destroyed images should already be removed from bindings
2791                image_state->binding.mem = MEMORY_UNBOUND;
2792                break;
2793            }
2794            case kVulkanObjectTypeBuffer: {
2795                auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
2796                assert(buffer_state);  // Any destroyed buffers should already be removed from bindings
2797                buffer_state->binding.mem = MEMORY_UNBOUND;
2798                break;
2799            }
2800            default:
2801                // Should only have buffer or image objects bound to memory
2802                assert(0);
2803        }
2804    }
2805    // Any bound cmd buffers are now invalid
2806    invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
2807    dev_data->memObjMap.erase(mem);
2808}
2809
2810VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
2811    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2812    DEVICE_MEM_INFO *mem_info = nullptr;
2813    VK_OBJECT obj_struct;
2814    unique_lock_t lock(global_lock);
2815    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
2816    if (!skip) {
2817        lock.unlock();
2818        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
2819        lock.lock();
2820        if (mem != VK_NULL_HANDLE) {
2821            PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
2822        }
2823    }
2824}
2825
2826// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
2827//  and that the size of the map range should be:
2828//  1. Not zero
2829//  2. Within the size of the memory allocation
2830static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
2831    bool skip = false;
2832
2833    if (size == 0) {
2834        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2835                       HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2836                       "VkMapMemory: Attempting to map memory range of size zero");
2837    }
2838
2839    auto mem_element = dev_data->memObjMap.find(mem);
2840    if (mem_element != dev_data->memObjMap.end()) {
2841        auto mem_info = mem_element->second.get();
2842        // It is an application error to call VkMapMemory on an object that is already mapped
2843        if (mem_info->mem_range.size != 0) {
2844            skip =
2845                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2846                        HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2847                        "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, HandleToUint64(mem));
2848        }
2849
2850        // Validate that offset + size is within object's allocationSize
2851        if (size == VK_WHOLE_SIZE) {
2852            if (offset >= mem_info->alloc_info.allocationSize) {
2853                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2854                               HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
2855                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
2856                               " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
2857                               offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
2858            }
2859        } else {
2860            if ((offset + size) > mem_info->alloc_info.allocationSize) {
2861                skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2862                               HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200552, "MEM",
2863                               "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s",
2864                               offset, size + offset, mem_info->alloc_info.allocationSize,
2865                               validation_error_map[VALIDATION_ERROR_31200552]);
2866            }
2867        }
2868    }
2869    return skip;
2870}
2871
2872static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
2873    auto mem_info = GetMemObjInfo(dev_data, mem);
2874    if (mem_info) {
2875        mem_info->mem_range.offset = offset;
2876        mem_info->mem_range.size = size;
2877    }
2878}
2879
2880static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
2881    bool skip = false;
2882    auto mem_info = GetMemObjInfo(dev_data, mem);
2883    if (mem_info) {
2884        if (!mem_info->mem_range.size) {
2885            // Valid Usage: memory must currently be mapped
2886            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
2887                           HandleToUint64(mem), __LINE__, VALIDATION_ERROR_33600562, "MEM",
2888                           "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", HandleToUint64(mem),
2889                           validation_error_map[VALIDATION_ERROR_33600562]);
2890        }
2891        mem_info->mem_range.size = 0;
2892        if (mem_info->shadow_copy) {
2893            free(mem_info->shadow_copy_base);
2894            mem_info->shadow_copy_base = 0;
2895            mem_info->shadow_copy = 0;
2896        }
2897    }
2898    return skip;
2899}
2900
2901// Guard value for pad data
2902static char NoncoherentMemoryFillValue = 0xb;
2903
2904static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
2905                                     void **ppData) {
2906    auto mem_info = GetMemObjInfo(dev_data, mem);
2907    if (mem_info) {
2908        mem_info->p_driver_data = *ppData;
2909        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
2910        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
2911            mem_info->shadow_copy = 0;
2912        } else {
2913            if (size == VK_WHOLE_SIZE) {
2914                size = mem_info->alloc_info.allocationSize - offset;
2915            }
2916            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
2917            assert(SafeModulo(mem_info->shadow_pad_size,
2918                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
2919            // Ensure start of mapped region reflects hardware alignment constraints
2920            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
2921
2922            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
2923            uint64_t start_offset = offset % map_alignment;
2924            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
2925            mem_info->shadow_copy_base =
2926                malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
2927
2928            mem_info->shadow_copy =
2929                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
2930                                         ~(map_alignment - 1)) +
2931                start_offset;
2932            assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
2933                                  map_alignment) == 0);
2934
2935            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
2936            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
2937        }
2938    }
2939}
2940
2941// Verify that state for fence being waited on is appropriate. That is,
2942//  a fence being waited on should not already be signaled and
2943//  it should have been submitted on a queue or during acquire next image
2944static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
2945    bool skip = false;
2946
2947    auto pFence = GetFenceNode(dev_data, fence);
2948    if (pFence) {
2949        if (pFence->state == FENCE_UNSIGNALED) {
2950            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2951                            HandleToUint64(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2952                            "%s called for fence 0x%" PRIxLEAST64
2953                            " which has not been submitted on a Queue or during "
2954                            "acquire next image.",
2955                            apiCall, HandleToUint64(fence));
2956        }
2957    }
2958    return skip;
2959}
2960
2961static void RetireFence(layer_data *dev_data, VkFence fence) {
2962    auto pFence = GetFenceNode(dev_data, fence);
2963    if (pFence->signaler.first != VK_NULL_HANDLE) {
2964        // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
2965        RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
2966    } else {
2967        // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
2968        // the fence as retired.
2969        pFence->state = FENCE_RETIRED;
2970    }
2971}
2972
2973static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
2974    if (dev_data->instance_data->disabled.wait_for_fences) return false;
2975    bool skip = false;
2976    for (uint32_t i = 0; i < fence_count; i++) {
2977        skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
2978        skip |= VerifyQueueStateToFence(dev_data, fences[i]);
2979    }
2980    return skip;
2981}
2982
2983static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
2984    // When we know that all fences are complete we can clean/remove their CBs
2985    if ((VK_TRUE == wait_all) || (1 == fence_count)) {
2986        for (uint32_t i = 0; i < fence_count; i++) {
2987            RetireFence(dev_data, fences[i]);
2988        }
2989    }
2990    // NOTE : Alternate case not handled here is when some fences have completed. In
2991    //  this case for app to guarantee which fences completed it will have to call
2992    //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
2993}
2994
2995VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
2996                                             uint64_t timeout) {
2997    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2998    // Verify fence status of submitted fences
2999    unique_lock_t lock(global_lock);
3000    bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
3001    lock.unlock();
3002    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3003
3004    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
3005
3006    if (result == VK_SUCCESS) {
3007        lock.lock();
3008        PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
3009        lock.unlock();
3010    }
3011    return result;
3012}
3013
3014static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
3015    if (dev_data->instance_data->disabled.get_fence_state) return false;
3016    return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
3017}
3018
3019static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
3020
3021VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
3022    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3023    unique_lock_t lock(global_lock);
3024    bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
3025    lock.unlock();
3026    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3027
3028    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
3029    if (result == VK_SUCCESS) {
3030        lock.lock();
3031        PostCallRecordGetFenceStatus(dev_data, fence);
3032        lock.unlock();
3033    }
3034    return result;
3035}
3036
3037static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
3038    // Add queue to tracking set only if it is new
3039    auto result = dev_data->queues.emplace(queue);
3040    if (result.second == true) {
3041        QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
3042        queue_state->queue = queue;
3043        queue_state->queueFamilyIndex = q_family_index;
3044        queue_state->seq = 0;
3045    }
3046}
3047
3048VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3049    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3050    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
3051    lock_guard_t lock(global_lock);
3052
3053    PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
3054}
3055
3056static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
3057    *queue_state = GetQueueState(dev_data, queue);
3058    if (dev_data->instance_data->disabled.queue_wait_idle) return false;
3059    return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
3060}
3061
3062static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
3063    RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
3064}
3065
3066VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
3067    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3068    QUEUE_STATE *queue_state = nullptr;
3069    unique_lock_t lock(global_lock);
3070    bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
3071    lock.unlock();
3072    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3073    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
3074    if (VK_SUCCESS == result) {
3075        lock.lock();
3076        PostCallRecordQueueWaitIdle(dev_data, queue_state);
3077        lock.unlock();
3078    }
3079    return result;
3080}
3081
3082static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
3083    if (dev_data->instance_data->disabled.device_wait_idle) return false;
3084    bool skip = false;
3085    for (auto &queue : dev_data->queueMap) {
3086        skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3087    }
3088    return skip;
3089}
3090
3091static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
3092    for (auto &queue : dev_data->queueMap) {
3093        RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3094    }
3095}
3096
3097VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
3098    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3099    unique_lock_t lock(global_lock);
3100    bool skip = PreCallValidateDeviceWaitIdle(dev_data);
3101    lock.unlock();
3102    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3103    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
3104    if (VK_SUCCESS == result) {
3105        lock.lock();
3106        PostCallRecordDeviceWaitIdle(dev_data);
3107        lock.unlock();
3108    }
3109    return result;
3110}
3111
3112static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
3113    *fence_node = GetFenceNode(dev_data, fence);
3114    *obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
3115    if (dev_data->instance_data->disabled.destroy_fence) return false;
3116    bool skip = false;
3117    if (*fence_node) {
3118        if ((*fence_node)->state == FENCE_INFLIGHT) {
3119            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3120                            HandleToUint64(fence), __LINE__, VALIDATION_ERROR_24e008c0, "DS", "Fence 0x%" PRIx64 " is in use. %s",
3121                            HandleToUint64(fence), validation_error_map[VALIDATION_ERROR_24e008c0]);
3122        }
3123    }
3124    return skip;
3125}
3126
3127static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
3128
3129VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
3130    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3131    // Common data objects used pre & post call
3132    FENCE_NODE *fence_node = nullptr;
3133    VK_OBJECT obj_struct;
3134    unique_lock_t lock(global_lock);
3135    bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
3136
3137    if (!skip) {
3138        lock.unlock();
3139        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
3140        lock.lock();
3141        PostCallRecordDestroyFence(dev_data, fence);
3142    }
3143}
3144
3145static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
3146                                            VK_OBJECT *obj_struct) {
3147    *sema_node = GetSemaphoreNode(dev_data, semaphore);
3148    *obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
3149    if (dev_data->instance_data->disabled.destroy_semaphore) return false;
3150    bool skip = false;
3151    if (*sema_node) {
3152        skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_268008e2);
3153    }
3154    return skip;
3155}
3156
3157static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
3158
3159VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
3160    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3161    SEMAPHORE_NODE *sema_node;
3162    VK_OBJECT obj_struct;
3163    unique_lock_t lock(global_lock);
3164    bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
3165    if (!skip) {
3166        lock.unlock();
3167        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
3168        lock.lock();
3169        PostCallRecordDestroySemaphore(dev_data, semaphore);
3170    }
3171}
3172
3173static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
3174    *event_state = GetEventNode(dev_data, event);
3175    *obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
3176    if (dev_data->instance_data->disabled.destroy_event) return false;
3177    bool skip = false;
3178    if (*event_state) {
3179        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_24c008f2);
3180    }
3181    return skip;
3182}
3183
3184static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
3185    invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
3186    dev_data->eventMap.erase(event);
3187}
3188
3189VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3190    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3191    EVENT_STATE *event_state = nullptr;
3192    VK_OBJECT obj_struct;
3193    unique_lock_t lock(global_lock);
3194    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
3195    if (!skip) {
3196        lock.unlock();
3197        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
3198        lock.lock();
3199        if (event != VK_NULL_HANDLE) {
3200            PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
3201        }
3202    }
3203}
3204
3205static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
3206                                            VK_OBJECT *obj_struct) {
3207    *qp_state = GetQueryPoolNode(dev_data, query_pool);
3208    *obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
3209    if (dev_data->instance_data->disabled.destroy_query_pool) return false;
3210    bool skip = false;
3211    if (*qp_state) {
3212        skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_26200632);
3213    }
3214    return skip;
3215}
3216
3217static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
3218                                           VK_OBJECT obj_struct) {
3219    invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
3220    dev_data->queryPoolMap.erase(query_pool);
3221}
3222
3223VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
3224    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3225    QUERY_POOL_NODE *qp_state = nullptr;
3226    VK_OBJECT obj_struct;
3227    unique_lock_t lock(global_lock);
3228    bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
3229    if (!skip) {
3230        lock.unlock();
3231        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
3232        lock.lock();
3233        if (queryPool != VK_NULL_HANDLE) {
3234            PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
3235        }
3236    }
3237}
3238static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3239                                               uint32_t query_count, VkQueryResultFlags flags,
3240                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3241    // TODO: clean this up, it's insanely wasteful.
3242    for (auto cmd_buffer : dev_data->commandBufferMap) {
3243        if (cmd_buffer.second->in_use.load()) {
3244            for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
3245                (*queries_in_flight)[query_state_pair.first].push_back(
3246                    cmd_buffer.first);
3247            }
3248        }
3249    }
3250    if (dev_data->instance_data->disabled.get_query_pool_results) return false;
3251    bool skip = false;
3252    for (uint32_t i = 0; i < query_count; ++i) {
3253        QueryObject query = {query_pool, first_query + i};
3254        auto qif_pair = queries_in_flight->find(query);
3255        auto query_state_pair = dev_data->queryToStateMap.find(query);
3256        if (query_state_pair != dev_data->queryToStateMap.end()) {
3257            // Available and in flight
3258            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3259                query_state_pair->second) {
3260                for (auto cmd_buffer : qif_pair->second) {
3261                    auto cb = GetCBNode(dev_data, cmd_buffer);
3262                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3263                    if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
3264                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3265                                        VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3266                                        "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
3267                                        HandleToUint64(query_pool), first_query + i);
3268                    }
3269                }
3270                // Unavailable and in flight
3271            } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3272                       !query_state_pair->second) {
3273                // TODO : Can there be the same query in use by multiple command buffers in flight?
3274                bool make_available = false;
3275                for (auto cmd_buffer : qif_pair->second) {
3276                    auto cb = GetCBNode(dev_data, cmd_buffer);
3277                    make_available |= cb->queryToStateMap[query];
3278                }
3279                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
3280                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3281                                    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3282                                    "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3283                                    HandleToUint64(query_pool), first_query + i);
3284                }
3285                // Unavailable
3286            } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
3287                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3288                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3289                                "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3290                                HandleToUint64(query_pool), first_query + i);
3291                // Uninitialized
3292            } else if (query_state_pair == dev_data->queryToStateMap.end()) {
3293                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3294                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3295                                "Cannot get query results on queryPool 0x%" PRIx64
3296                                " with index %d as data has not been collected for this index.",
3297                                HandleToUint64(query_pool), first_query + i);
3298            }
3299        }
3300    }
3301    return skip;
3302}
3303
3304static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3305                                              uint32_t query_count,
3306                                              unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3307    for (uint32_t i = 0; i < query_count; ++i) {
3308        QueryObject query = {query_pool, first_query + i};
3309        auto qif_pair = queries_in_flight->find(query);
3310        auto query_state_pair = dev_data->queryToStateMap.find(query);
3311        if (query_state_pair != dev_data->queryToStateMap.end()) {
3312            // Available and in flight
3313            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3314                query_state_pair->second) {
3315                for (auto cmd_buffer : qif_pair->second) {
3316                    auto cb = GetCBNode(dev_data, cmd_buffer);
3317                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3318                    if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
3319                        for (auto event : query_event_pair->second) {
3320                            dev_data->eventMap[event].needsSignaled = true;
3321                        }
3322                    }
3323                }
3324            }
3325        }
3326    }
3327}
3328
3329VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
3330                                                   size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
3331    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3332    unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
3333    unique_lock_t lock(global_lock);
3334    bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
3335    lock.unlock();
3336    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3337    VkResult result =
3338        dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
3339    lock.lock();
3340    PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
3341    lock.unlock();
3342    return result;
3343}
3344
3345// Return true if given ranges intersect, else false
3346// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
3347//  in an error so not checking that here
3348// pad_ranges bool indicates a linear and non-linear comparison which requires padding
3349// In the case where padding is required, if an alias is encountered then a validation error is reported and skip
3350//  may be set by the callback function so caller should merge in skip value if padding case is possible.
3351// This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
3352static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
3353                            bool skip_checks) {
3354    *skip = false;
3355    auto r1_start = range1->start;
3356    auto r1_end = range1->end;
3357    auto r2_start = range2->start;
3358    auto r2_end = range2->end;
3359    VkDeviceSize pad_align = 1;
3360    if (range1->linear != range2->linear) {
3361        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
3362    }
3363    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
3364    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
3365
3366    if (!skip_checks && (range1->linear != range2->linear)) {
3367        // In linear vs. non-linear case, warn of aliasing
3368        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
3369        const char *r1_type_str = range1->image ? "image" : "buffer";
3370        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
3371        const char *r2_type_str = range2->image ? "image" : "buffer";
3372        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
3373        *skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
3374                         MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
3375                                                           " which may indicate a bug. For further info refer to the "
3376                                                           "Buffer-Image Granularity section of the Vulkan specification. "
3377                                                           "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
3378                                                           "xhtml/vkspec.html#resources-bufferimagegranularity)",
3379                         r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
3380    }
3381    // Ranges intersect
3382    return true;
3383}
3384// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
3385bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
3386    // Create a local MEMORY_RANGE struct to wrap offset/size
3387    MEMORY_RANGE range_wrap;
3388    // Synch linear with range1 to avoid padding and potential validation error case
3389    range_wrap.linear = range1->linear;
3390    range_wrap.start = offset;
3391    range_wrap.end = end;
3392    bool tmp_bool;
3393    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
3394}
3395// For given mem_info, set all ranges valid that intersect [offset-end] range
3396// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
3397static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
3398    bool tmp_bool = false;
3399    MEMORY_RANGE map_range = {};
3400    map_range.linear = true;
3401    map_range.start = offset;
3402    map_range.end = end;
3403    for (auto &handle_range_pair : mem_info->bound_ranges) {
3404        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool, false)) {
3405            // TODO : WARN here if tmp_bool true?
3406            handle_range_pair.second.valid = true;
3407        }
3408    }
3409}
3410
3411static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
3412                                      VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
3413                                      bool is_linear, const char *api_name) {
3414    bool skip = false;
3415
3416    MEMORY_RANGE range;
3417    range.image = is_image;
3418    range.handle = handle;
3419    range.linear = is_linear;
3420    range.valid = mem_info->global_valid;
3421    range.memory = mem_info->mem;
3422    range.start = memoryOffset;
3423    range.size = memRequirements.size;
3424    range.end = memoryOffset + memRequirements.size - 1;
3425    range.aliases.clear();
3426
3427    // Check for aliasing problems.
3428    for (auto &obj_range_pair : mem_info->bound_ranges) {
3429        auto check_range = &obj_range_pair.second;
3430        bool intersection_error = false;
3431        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
3432            skip |= intersection_error;
3433            range.aliases.insert(check_range);
3434        }
3435    }
3436
3437    if (memoryOffset >= mem_info->alloc_info.allocationSize) {
3438        UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_1740082c : VALIDATION_ERROR_1700080e;
3439        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3440                       HandleToUint64(mem_info->mem), __LINE__, error_code, "MEM",
3441                       "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
3442                       "), memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ". %s",
3443                       api_name, HandleToUint64(mem_info->mem), handle, memoryOffset, mem_info->alloc_info.allocationSize,
3444                       validation_error_map[error_code]);
3445    }
3446
3447    return skip;
3448}
3449
3450// Object with given handle is being bound to memory w/ given mem_info struct.
3451//  Track the newly bound memory range with given memoryOffset
3452//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
3453//  and non-linear range incorrectly overlap.
3454// Return true if an error is flagged and the user callback returns "true", otherwise false
3455// is_image indicates an image object, otherwise handle is for a buffer
3456// is_linear indicates a buffer or linear image
3457static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
3458                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
3459    MEMORY_RANGE range;
3460
3461    range.image = is_image;
3462    range.handle = handle;
3463    range.linear = is_linear;
3464    range.valid = mem_info->global_valid;
3465    range.memory = mem_info->mem;
3466    range.start = memoryOffset;
3467    range.size = memRequirements.size;
3468    range.end = memoryOffset + memRequirements.size - 1;
3469    range.aliases.clear();
3470    // Update Memory aliasing
3471    // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
3472    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
3473    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
3474    for (auto &obj_range_pair : mem_info->bound_ranges) {
3475        auto check_range = &obj_range_pair.second;
3476        bool intersection_error = false;
3477        if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
3478            range.aliases.insert(check_range);
3479            tmp_alias_ranges.insert(check_range);
3480        }
3481    }
3482    mem_info->bound_ranges[handle] = std::move(range);
3483    for (auto tmp_range : tmp_alias_ranges) {
3484        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
3485    }
3486    if (is_image)
3487        mem_info->bound_images.insert(handle);
3488    else
3489        mem_info->bound_buffers.insert(handle);
3490}
3491
3492static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
3493                                           VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
3494                                           const char *api_name) {
3495    return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
3496}
3497static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3498                                   VkMemoryRequirements mem_reqs, bool is_linear) {
3499    InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
3500}
3501
3502static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
3503                                            VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
3504    return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
3505}
3506static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3507                                    VkMemoryRequirements mem_reqs) {
3508    InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
3509}
3510
3511// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
3512//  is_image indicates if handle is for image or buffer
3513//  This function will also remove the handle-to-index mapping from the appropriate
3514//  map and clean up any aliases for range being removed.
3515static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
3516    auto erase_range = &mem_info->bound_ranges[handle];
3517    for (auto alias_range : erase_range->aliases) {
3518        alias_range->aliases.erase(erase_range);
3519    }
3520    erase_range->aliases.clear();
3521    mem_info->bound_ranges.erase(handle);
3522    if (is_image) {
3523        mem_info->bound_images.erase(handle);
3524    } else {
3525        mem_info->bound_buffers.erase(handle);
3526    }
3527}
3528
3529void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
3530
3531void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
3532
3533VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
3534    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3535    BUFFER_STATE *buffer_state = nullptr;
3536    VK_OBJECT obj_struct;
3537    unique_lock_t lock(global_lock);
3538    bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
3539    if (!skip) {
3540        lock.unlock();
3541        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
3542        lock.lock();
3543        if (buffer != VK_NULL_HANDLE) {
3544            PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
3545        }
3546    }
3547}
3548
3549VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
3550    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3551    // Common data objects used pre & post call
3552    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
3553    VK_OBJECT obj_struct;
3554    unique_lock_t lock(global_lock);
3555    // Validate state before calling down chain, update common data if we'll be calling down chain
3556    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
3557    if (!skip) {
3558        lock.unlock();
3559        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
3560        lock.lock();
3561        if (bufferView != VK_NULL_HANDLE) {
3562            PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
3563        }
3564    }
3565}
3566
3567VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
3568    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3569    IMAGE_STATE *image_state = nullptr;
3570    VK_OBJECT obj_struct;
3571    unique_lock_t lock(global_lock);
3572    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
3573    if (!skip) {
3574        lock.unlock();
3575        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
3576        lock.lock();
3577        if (image != VK_NULL_HANDLE) {
3578            PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
3579        }
3580    }
3581}
3582
3583static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
3584                                const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3585    bool skip = false;
3586    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
3587        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3588                       HandleToUint64(mem_info->mem), __LINE__, msgCode, "MT",
3589                       "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
3590                       "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
3591                       funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem),
3592                       validation_error_map[msgCode]);
3593    }
3594    return skip;
3595}
3596
3597static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3598                                            VkDeviceSize memoryOffset) {
3599    bool skip = false;
3600    if (buffer_state) {
3601        unique_lock_t lock(global_lock);
3602        // Track objects tied to memory
3603        uint64_t buffer_handle = HandleToUint64(buffer);
3604        skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
3605        if (!buffer_state->memory_requirements_checked) {
3606            // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
3607            // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
3608            // vkGetBufferMemoryRequirements()
3609            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3610                            buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
3611                            "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
3612                            " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
3613                            buffer_handle);
3614            // Make the call for them so we can verify the state
3615            lock.unlock();
3616            dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
3617            lock.lock();
3618        }
3619
3620        // Validate bound memory range information
3621        auto mem_info = GetMemObjInfo(dev_data, mem);
3622        if (mem_info) {
3623            skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements,
3624                                                    "vkBindBufferMemory()");
3625            skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
3626                                        VALIDATION_ERROR_17000816);
3627        }
3628
3629        // Validate memory requirements alignment
3630        if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
3631            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3632                            buffer_handle, __LINE__, VALIDATION_ERROR_17000818, "DS",
3633                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64
3634                            " but must be an integer multiple of the "
3635                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
3636                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3637                            memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_17000818]);
3638        }
3639
3640        // Validate memory requirements size
3641        if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
3642            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3643                            buffer_handle, __LINE__, VALIDATION_ERROR_1700081a, "DS",
3644                            "vkBindBufferMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
3645                            " but must be at least as large as "
3646                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
3647                            ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3648                            mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size,
3649                            validation_error_map[VALIDATION_ERROR_1700081a]);
3650        }
3651
3652        // Validate device limits alignments
3653        static const VkBufferUsageFlagBits usage_list[3] = {
3654            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
3655            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
3656        static const char *memory_type[3] = {"texel", "uniform", "storage"};
3657        static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
3658                                             "minStorageBufferOffsetAlignment"};
3659
3660        // TODO:  vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
3661        // clang-format off
3662        static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_17000810, VALIDATION_ERROR_17000812,
3663            VALIDATION_ERROR_17000814 };
3664        // clang-format on
3665
3666        // Keep this one fresh!
3667        const VkDeviceSize offset_requirement[3] = {
3668            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
3669            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
3670            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
3671        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
3672
3673        for (int i = 0; i < 3; i++) {
3674            if (usage & usage_list[i]) {
3675                if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
3676                    skip |= log_msg(
3677                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, buffer_handle,
3678                        __LINE__, msgCode[i], "DS", "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64
3679                                                    " but must be a multiple of "
3680                                                    "device limit %s 0x%" PRIxLEAST64 ". %s",
3681                        memory_type[i], memoryOffset, offset_name[i], offset_requirement[i], validation_error_map[msgCode[i]]);
3682                }
3683            }
3684        }
3685    }
3686    return skip;
3687}
3688
3689static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3690                                           VkDeviceSize memoryOffset) {
3691    if (buffer_state) {
3692        unique_lock_t lock(global_lock);
3693        // Track bound memory range information
3694        auto mem_info = GetMemObjInfo(dev_data, mem);
3695        if (mem_info) {
3696            InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
3697        }
3698
3699        // Track objects tied to memory
3700        uint64_t buffer_handle = HandleToUint64(buffer);
3701        SetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, "vkBindBufferMemory()");
3702
3703        buffer_state->binding.mem = mem;
3704        buffer_state->binding.offset = memoryOffset;
3705        buffer_state->binding.size = buffer_state->requirements.size;
3706    }
3707}
3708
3709VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
3710    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3711    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3712    auto buffer_state = GetBufferState(dev_data, buffer);
3713    bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
3714    if (!skip) {
3715        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
3716        if (result == VK_SUCCESS) {
3717            PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset);
3718        }
3719    }
3720    return result;
3721}
3722
3723VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
3724                                                       VkMemoryRequirements *pMemoryRequirements) {
3725    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3726    dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
3727    auto buffer_state = GetBufferState(dev_data, buffer);
3728    if (buffer_state) {
3729        buffer_state->requirements = *pMemoryRequirements;
3730        buffer_state->memory_requirements_checked = true;
3731    }
3732}
3733
3734VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
3735    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3736    dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
3737    auto image_state = GetImageState(dev_data, image);
3738    if (image_state) {
3739        image_state->requirements = *pMemoryRequirements;
3740        image_state->memory_requirements_checked = true;
3741    }
3742}
3743
3744VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
3745    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3746    // Common data objects used pre & post call
3747    IMAGE_VIEW_STATE *image_view_state = nullptr;
3748    VK_OBJECT obj_struct;
3749    unique_lock_t lock(global_lock);
3750    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
3751    if (!skip) {
3752        lock.unlock();
3753        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
3754        lock.lock();
3755        if (imageView != VK_NULL_HANDLE) {
3756            PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
3757        }
3758    }
3759}
3760
3761VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
3762                                               const VkAllocationCallbacks *pAllocator) {
3763    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3764
3765    unique_lock_t lock(global_lock);
3766    dev_data->shaderModuleMap.erase(shaderModule);
3767    lock.unlock();
3768
3769    dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
3770}
3771
3772static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
3773                                           VK_OBJECT *obj_struct) {
3774    *pipeline_state = getPipelineState(dev_data, pipeline);
3775    *obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
3776    if (dev_data->instance_data->disabled.destroy_pipeline) return false;
3777    bool skip = false;
3778    if (*pipeline_state) {
3779        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_25c005fa);
3780    }
3781    return skip;
3782}
3783
3784static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
3785                                          VK_OBJECT obj_struct) {
3786    // Any bound cmd buffers are now invalid
3787    invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
3788    delete getPipelineState(dev_data, pipeline);
3789    dev_data->pipelineMap.erase(pipeline);
3790}
3791
3792VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
3793    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3794    PIPELINE_STATE *pipeline_state = nullptr;
3795    VK_OBJECT obj_struct;
3796    unique_lock_t lock(global_lock);
3797    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
3798    if (!skip) {
3799        lock.unlock();
3800        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
3801        lock.lock();
3802        if (pipeline != VK_NULL_HANDLE) {
3803            PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
3804        }
3805    }
3806}
3807
3808VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
3809                                                 const VkAllocationCallbacks *pAllocator) {
3810    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3811    unique_lock_t lock(global_lock);
3812    dev_data->pipelineLayoutMap.erase(pipelineLayout);
3813    lock.unlock();
3814
3815    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
3816}
3817
3818static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
3819                                          VK_OBJECT *obj_struct) {
3820    *sampler_state = GetSamplerState(dev_data, sampler);
3821    *obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
3822    if (dev_data->instance_data->disabled.destroy_sampler) return false;
3823    bool skip = false;
3824    if (*sampler_state) {
3825        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_26600874);
3826    }
3827    return skip;
3828}
3829
3830static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
3831                                         VK_OBJECT obj_struct) {
3832    // Any bound cmd buffers are now invalid
3833    if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
3834    dev_data->samplerMap.erase(sampler);
3835}
3836
3837VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
3838    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3839    SAMPLER_STATE *sampler_state = nullptr;
3840    VK_OBJECT obj_struct;
3841    unique_lock_t lock(global_lock);
3842    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
3843    if (!skip) {
3844        lock.unlock();
3845        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
3846        lock.lock();
3847        if (sampler != VK_NULL_HANDLE) {
3848            PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
3849        }
3850    }
3851}
3852
3853static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
3854    dev_data->descriptorSetLayoutMap.erase(ds_layout);
3855}
3856
3857VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
3858                                                      const VkAllocationCallbacks *pAllocator) {
3859    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3860    dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
3861    unique_lock_t lock(global_lock);
3862    PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
3863}
3864
3865static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
3866                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
3867    *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
3868    *obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
3869    if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
3870    bool skip = false;
3871    if (*desc_pool_state) {
3872        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_2440025e);
3873    }
3874    return skip;
3875}
3876
3877static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
3878                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
3879    // Any bound cmd buffers are now invalid
3880    invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
3881    // Free sets that were in this pool
3882    for (auto ds : desc_pool_state->sets) {
3883        freeDescriptorSet(dev_data, ds);
3884    }
3885    dev_data->descriptorPoolMap.erase(descriptorPool);
3886    delete desc_pool_state;
3887}
3888
3889VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
3890                                                 const VkAllocationCallbacks *pAllocator) {
3891    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3892    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
3893    VK_OBJECT obj_struct;
3894    unique_lock_t lock(global_lock);
3895    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
3896    if (!skip) {
3897        lock.unlock();
3898        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
3899        lock.lock();
3900        if (descriptorPool != VK_NULL_HANDLE) {
3901            PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
3902        }
3903    }
3904}
3905// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
3906//  If this is a secondary command buffer, then make sure its primary is also in-flight
3907//  If primary is not in-flight, then remove secondary from global in-flight set
3908// This function is only valid at a point when cmdBuffer is being reset or freed
3909static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
3910                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
3911    bool skip = false;
3912    if (cb_node->in_use.load()) {
3913        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3914                        HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
3915                        "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
3916                        validation_error_map[error_code]);
3917    }
3918    return skip;
3919}
3920
3921// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
3922static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
3923                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
3924    bool skip = false;
3925    for (auto cmd_buffer : pPool->commandBuffers) {
3926        skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
3927    }
3928    return skip;
3929}
3930
3931VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
3932                                              const VkCommandBuffer *pCommandBuffers) {
3933    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3934    bool skip = false;
3935    unique_lock_t lock(global_lock);
3936
3937    for (uint32_t i = 0; i < commandBufferCount; i++) {
3938        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
3939        // Delete CB information structure, and remove from commandBufferMap
3940        if (cb_node) {
3941            skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_2840005e);
3942        }
3943    }
3944
3945    if (skip) return;
3946
3947    auto pPool = GetCommandPoolNode(dev_data, commandPool);
3948    for (uint32_t i = 0; i < commandBufferCount; i++) {
3949        auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
3950        // Delete CB information structure, and remove from commandBufferMap
3951        if (cb_node) {
3952            // reset prior to delete for data clean-up
3953            // TODO: fix this, it's insane.
3954            resetCB(dev_data, cb_node->commandBuffer);
3955            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
3956            delete cb_node;
3957        }
3958
3959        // Remove commandBuffer reference from commandPoolMap
3960        pPool->commandBuffers.remove(pCommandBuffers[i]);
3961    }
3962    lock.unlock();
3963
3964    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
3965}
3966
3967VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
3968                                                 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
3969    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3970
3971    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
3972
3973    if (VK_SUCCESS == result) {
3974        lock_guard_t lock(global_lock);
3975        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
3976        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
3977    }
3978    return result;
3979}
3980
3981VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
3982                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
3983    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3984    bool skip = false;
3985    if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
3986        if (!dev_data->enabled_features.pipelineStatisticsQuery) {
3987            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3988                            __LINE__, VALIDATION_ERROR_11c0062e, "DS",
3989                            "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
3990                            "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
3991                            validation_error_map[VALIDATION_ERROR_11c0062e]);
3992        }
3993    }
3994
3995    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3996    if (!skip) {
3997        result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
3998    }
3999    if (result == VK_SUCCESS) {
4000        lock_guard_t lock(global_lock);
4001        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
4002        qp_node->createInfo = *pCreateInfo;
4003    }
4004    return result;
4005}
4006
4007static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
4008    *cp_state = GetCommandPoolNode(dev_data, pool);
4009    if (dev_data->instance_data->disabled.destroy_command_pool) return false;
4010    bool skip = false;
4011    if (*cp_state) {
4012        // Verify that command buffers in pool are complete (not in-flight)
4013        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_24000052);
4014    }
4015    return skip;
4016}
4017
4018static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
4019    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
4020    for (auto cb : cp_state->commandBuffers) {
4021        auto cb_node = GetCBNode(dev_data, cb);
4022        clear_cmd_buf_and_mem_references(dev_data, cb_node);
4023        // Remove references to this cb_node prior to delete
4024        // TODO : Need better solution here, resetCB?
4025        for (auto obj : cb_node->object_bindings) {
4026            removeCommandBufferBinding(dev_data, &obj, cb_node);
4027        }
4028        for (auto framebuffer : cb_node->framebuffers) {
4029            auto fb_state = GetFramebufferState(dev_data, framebuffer);
4030            if (fb_state) fb_state->cb_bindings.erase(cb_node);
4031        }
4032        dev_data->commandBufferMap.erase(cb);  // Remove this command buffer
4033        delete cb_node;                        // delete CB info structure
4034    }
4035    dev_data->commandPoolMap.erase(pool);
4036}
4037
4038// Destroy commandPool along with all of the commandBuffers allocated from that pool
4039VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
4040    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4041    COMMAND_POOL_NODE *cp_state = nullptr;
4042    unique_lock_t lock(global_lock);
4043    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
4044    if (!skip) {
4045        lock.unlock();
4046        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
4047        lock.lock();
4048        if (commandPool != VK_NULL_HANDLE) {
4049            PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
4050        }
4051    }
4052}
4053
4054VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4055    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4056    bool skip = false;
4057
4058    unique_lock_t lock(global_lock);
4059    auto pPool = GetCommandPoolNode(dev_data, commandPool);
4060    skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_32800050);
4061    lock.unlock();
4062
4063    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4064
4065    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
4066
4067    // Reset all of the CBs allocated from this pool
4068    if (VK_SUCCESS == result) {
4069        lock.lock();
4070        for (auto cmdBuffer : pPool->commandBuffers) {
4071            resetCB(dev_data, cmdBuffer);
4072        }
4073        lock.unlock();
4074    }
4075    return result;
4076}
4077
4078VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4079    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4080    bool skip = false;
4081    unique_lock_t lock(global_lock);
4082    for (uint32_t i = 0; i < fenceCount; ++i) {
4083        auto pFence = GetFenceNode(dev_data, pFences[i]);
4084        if (pFence && pFence->state == FENCE_INFLIGHT) {
4085            skip |=
4086                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4087                        HandleToUint64(pFences[i]), __LINE__, VALIDATION_ERROR_32e008c6, "DS", "Fence 0x%" PRIx64 " is in use. %s",
4088                        HandleToUint64(pFences[i]), validation_error_map[VALIDATION_ERROR_32e008c6]);
4089        }
4090    }
4091    lock.unlock();
4092
4093    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4094
4095    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
4096
4097    if (result == VK_SUCCESS) {
4098        lock.lock();
4099        for (uint32_t i = 0; i < fenceCount; ++i) {
4100            auto pFence = GetFenceNode(dev_data, pFences[i]);
4101            if (pFence) {
4102                pFence->state = FENCE_UNSIGNALED;
4103            }
4104        }
4105        lock.unlock();
4106    }
4107
4108    return result;
4109}
4110
4111// For given cb_nodes, invalidate them and track object causing invalidation
4112void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
4113    for (auto cb_node : cb_nodes) {
4114        if (cb_node->state == CB_RECORDING) {
4115            log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4116                    HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4117                    "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
4118            cb_node->state = CB_INVALID_INCOMPLETE;
4119        }
4120        else {
4121            cb_node->state = CB_INVALID_COMPLETE;
4122        }
4123        cb_node->broken_bindings.push_back(obj);
4124
4125        // if secondary, then propagate the invalidation to the primaries that will call us.
4126        if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4127            invalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4128        }
4129    }
4130}
4131
4132static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
4133                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
4134    *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
4135    *obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4136    if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
4137    bool skip = false;
4138    if (*framebuffer_state) {
4139        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_250006f8);
4140    }
4141    return skip;
4142}
4143
4144static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
4145                                             VK_OBJECT obj_struct) {
4146    invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
4147    dev_data->frameBufferMap.erase(framebuffer);
4148}
4149
4150VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
4151    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4152    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
4153    VK_OBJECT obj_struct;
4154    unique_lock_t lock(global_lock);
4155    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
4156    if (!skip) {
4157        lock.unlock();
4158        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
4159        lock.lock();
4160        if (framebuffer != VK_NULL_HANDLE) {
4161            PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
4162        }
4163    }
4164}
4165
4166static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
4167                                             VK_OBJECT *obj_struct) {
4168    *rp_state = GetRenderPassState(dev_data, render_pass);
4169    *obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
4170    if (dev_data->instance_data->disabled.destroy_renderpass) return false;
4171    bool skip = false;
4172    if (*rp_state) {
4173        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_264006d2);
4174    }
4175    return skip;
4176}
4177
4178static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
4179                                            VK_OBJECT obj_struct) {
4180    invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
4181    dev_data->renderPassMap.erase(render_pass);
4182}
4183
4184VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4185    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4186    RENDER_PASS_STATE *rp_state = nullptr;
4187    VK_OBJECT obj_struct;
4188    unique_lock_t lock(global_lock);
4189    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
4190    if (!skip) {
4191        lock.unlock();
4192        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
4193        lock.lock();
4194        if (renderPass != VK_NULL_HANDLE) {
4195            PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
4196        }
4197    }
4198}
4199
4200VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
4201                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
4202    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4203    unique_lock_t lock(global_lock);
4204    bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
4205    lock.unlock();
4206
4207    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4208    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
4209
4210    if (VK_SUCCESS == result) {
4211        lock.lock();
4212        PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
4213        lock.unlock();
4214    }
4215    return result;
4216}
4217
4218VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
4219                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
4220    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4221    unique_lock_t lock(global_lock);
4222    bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
4223    lock.unlock();
4224    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4225    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
4226    if (VK_SUCCESS == result) {
4227        lock.lock();
4228        PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
4229        lock.unlock();
4230    }
4231    return result;
4232}
4233
4234// Access helper functions for external modules
4235const VkFormatProperties *GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
4236    VkFormatProperties *format_properties = new VkFormatProperties;
4237    instance_layer_data *instance_data =
4238        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4239    instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, format_properties);
4240    return format_properties;
4241}
4242
4243const VkImageFormatProperties *GetImageFormatProperties(core_validation::layer_data *device_data, VkFormat format,
4244                                                        VkImageType image_type, VkImageTiling tiling, VkImageUsageFlags usage,
4245                                                        VkImageCreateFlags flags) {
4246    VkImageFormatProperties *image_format_properties = new VkImageFormatProperties;
4247    instance_layer_data *instance_data =
4248        GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4249    instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(device_data->physical_device, format, image_type, tiling,
4250                                                                         usage, flags, image_format_properties);
4251    return image_format_properties;
4252}
4253
4254const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
4255
4256const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
4257    return &device_data->phys_dev_props;
4258}
4259
4260const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
4261
4262std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
4263    return &device_data->imageMap;
4264}
4265
4266std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
4267    return &device_data->imageSubresourceMap;
4268}
4269
4270std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
4271    return &device_data->imageLayoutMap;
4272}
4273
4274std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
4275    return &device_data->imageLayoutMap;
4276}
4277
4278std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
4279    return &device_data->bufferMap;
4280}
4281
4282std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
4283    return &device_data->bufferViewMap;
4284}
4285
4286std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
4287    return &device_data->imageViewMap;
4288}
4289
4290const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) {
4291    return &device_data->phys_dev_properties;
4292}
4293
4294const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) {
4295    return &device_data->enabled_features;
4296}
4297
4298const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
4299
4300VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
4301                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
4302    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4303    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4304    bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
4305    if (!skip) {
4306        result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
4307    }
4308    if (VK_SUCCESS == result) {
4309        lock_guard_t lock(global_lock);
4310        PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
4311    }
4312    return result;
4313}
4314
4315VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
4316                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
4317    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4318    unique_lock_t lock(global_lock);
4319    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
4320    lock.unlock();
4321    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4322    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
4323    if (VK_SUCCESS == result) {
4324        lock.lock();
4325        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
4326        lock.unlock();
4327    }
4328
4329    return result;
4330}
4331
4332VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
4333                                           const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
4334    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4335    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
4336    if (VK_SUCCESS == result) {
4337        lock_guard_t lock(global_lock);
4338        auto &fence_node = dev_data->fenceMap[*pFence];
4339        fence_node.fence = *pFence;
4340        fence_node.createInfo = *pCreateInfo;
4341        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
4342    }
4343    return result;
4344}
4345
4346// TODO handle pipeline caches
4347VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
4348                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
4349    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4350    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
4351    return result;
4352}
4353
4354VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
4355                                                const VkAllocationCallbacks *pAllocator) {
4356    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4357    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
4358}
4359
4360VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
4361                                                    void *pData) {
4362    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4363    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
4364    return result;
4365}
4366
4367VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
4368                                                   const VkPipelineCache *pSrcCaches) {
4369    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4370    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
4371    return result;
4372}
4373
4374// utility function to set collective state for pipeline
4375void set_pipeline_state(PIPELINE_STATE *pPipe) {
4376    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
4377    if (pPipe->graphicsPipelineCI.pColorBlendState) {
4378        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
4379            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
4380                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4381                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4382                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4383                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4384                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4385                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4386                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4387                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
4388                    pPipe->blendConstantsEnabled = true;
4389                }
4390            }
4391        }
4392    }
4393}
4394
4395bool validate_dual_src_blend_feature(layer_data *device_data, PIPELINE_STATE *pipe_state) {
4396    bool skip = false;
4397    if (pipe_state->graphicsPipelineCI.pColorBlendState) {
4398        for (size_t i = 0; i < pipe_state->attachments.size(); ++i) {
4399            if (!device_data->enabled_features.dualSrcBlend) {
4400                if ((pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4401                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4402                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4403                    (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA) ||
4404                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4405                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4406                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4407                    (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
4408                    skip |=
4409                        log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
4410                                HandleToUint64(pipe_state->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
4411                                "CmdBindPipeline: vkPipeline (0x%" PRIxLEAST64 ") attachment[" PRINTF_SIZE_T_SPECIFIER
4412                                "] has a dual-source blend factor but this device feature is not enabled.",
4413                                HandleToUint64(pipe_state->pipeline), i);
4414                }
4415            }
4416        }
4417    }
4418    return skip;
4419}
4420
4421VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4422                                                       const VkGraphicsPipelineCreateInfo *pCreateInfos,
4423                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4424    // TODO What to do with pipelineCache?
4425    // The order of operations here is a little convoluted but gets the job done
4426    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
4427    //  2. Create state is then validated (which uses flags setup during shadowing)
4428    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
4429    bool skip = false;
4430    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
4431    vector<PIPELINE_STATE *> pipe_state(count);
4432    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4433
4434    uint32_t i = 0;
4435    unique_lock_t lock(global_lock);
4436
4437    for (i = 0; i < count; i++) {
4438        pipe_state[i] = new PIPELINE_STATE;
4439        pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
4440        pipe_state[i]->render_pass_ci.initialize(GetRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
4441        pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4442    }
4443
4444    for (i = 0; i < count; i++) {
4445        skip |= ValidatePipelineLocked(dev_data, pipe_state, i);
4446    }
4447
4448    lock.unlock();
4449
4450    for (i = 0; i < count; i++) {
4451        skip |= ValidatePipelineUnlocked(dev_data, pipe_state, i);
4452    }
4453
4454    if (skip) {
4455        for (i = 0; i < count; i++) {
4456            delete pipe_state[i];
4457            pPipelines[i] = VK_NULL_HANDLE;
4458        }
4459        return VK_ERROR_VALIDATION_FAILED_EXT;
4460    }
4461
4462    auto result =
4463        dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4464    lock.lock();
4465    for (i = 0; i < count; i++) {
4466        if (pPipelines[i] == VK_NULL_HANDLE) {
4467            delete pipe_state[i];
4468        } else {
4469            pipe_state[i]->pipeline = pPipelines[i];
4470            dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
4471        }
4472    }
4473
4474    return result;
4475}
4476
4477VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4478                                                      const VkComputePipelineCreateInfo *pCreateInfos,
4479                                                      const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4480    bool skip = false;
4481
4482    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
4483    vector<PIPELINE_STATE *> pPipeState(count);
4484    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4485
4486    uint32_t i = 0;
4487    unique_lock_t lock(global_lock);
4488    for (i = 0; i < count; i++) {
4489        // TODO: Verify compute stage bits
4490
4491        // Create and initialize internal tracking data structure
4492        pPipeState[i] = new PIPELINE_STATE;
4493        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
4494        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4495
4496        // TODO: Add Compute Pipeline Verification
4497        skip |= validate_compute_pipeline(dev_data, pPipeState[i]);
4498    }
4499
4500    if (skip) {
4501        for (i = 0; i < count; i++) {
4502            // Clean up any locally allocated data structures
4503            delete pPipeState[i];
4504            pPipelines[i] = VK_NULL_HANDLE;
4505        }
4506        return VK_ERROR_VALIDATION_FAILED_EXT;
4507    }
4508
4509    lock.unlock();
4510    auto result =
4511        dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4512    lock.lock();
4513    for (i = 0; i < count; i++) {
4514        if (pPipelines[i] == VK_NULL_HANDLE) {
4515            delete pPipeState[i];
4516        } else {
4517            pPipeState[i]->pipeline = pPipelines[i];
4518            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
4519        }
4520    }
4521
4522    return result;
4523}
4524
4525VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
4526                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
4527    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4528    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
4529    if (VK_SUCCESS == result) {
4530        lock_guard_t lock(global_lock);
4531        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
4532    }
4533    return result;
4534}
4535
4536static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
4537    if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
4538    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
4539}
4540
4541static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
4542                                                    VkDescriptorSetLayout set_layout) {
4543    dev_data->descriptorSetLayoutMap[set_layout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(create_info, set_layout);
4544}
4545
4546VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
4547                                                         const VkAllocationCallbacks *pAllocator,
4548                                                         VkDescriptorSetLayout *pSetLayout) {
4549    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4550    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4551    unique_lock_t lock(global_lock);
4552    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
4553    if (!skip) {
4554        lock.unlock();
4555        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
4556        if (VK_SUCCESS == result) {
4557            lock.lock();
4558            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
4559        }
4560    }
4561    return result;
4562}
4563
4564// Used by CreatePipelineLayout and CmdPushConstants.
4565// Note that the index argument is optional and only used by CreatePipelineLayout.
4566static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
4567                                      const char *caller_name, uint32_t index = 0) {
4568    if (dev_data->instance_data->disabled.push_constant_range) return false;
4569    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
4570    bool skip = false;
4571    // Check that offset + size don't exceed the max.
4572    // Prevent arithetic overflow here by avoiding addition and testing in this order.
4573    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
4574        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
4575        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4576            if (offset >= maxPushConstantsSize) {
4577                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4578                                __LINE__, VALIDATION_ERROR_11a0024c, "DS",
4579                                "%s call has push constants index %u with offset %u that "
4580                                "exceeds this device's maxPushConstantSize of %u. %s",
4581                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_11a0024c]);
4582            }
4583            if (size > maxPushConstantsSize - offset) {
4584                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4585                                __LINE__, VALIDATION_ERROR_11a00254, "DS",
4586                                "%s call has push constants index %u with offset %u and size %u that "
4587                                "exceeds this device's maxPushConstantSize of %u. %s",
4588                                caller_name, index, offset, size, maxPushConstantsSize,
4589                                validation_error_map[VALIDATION_ERROR_11a00254]);
4590            }
4591        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4592            if (offset >= maxPushConstantsSize) {
4593                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4594                                __LINE__, VALIDATION_ERROR_1bc002e4, "DS",
4595                                "%s call has push constants index %u with offset %u that "
4596                                "exceeds this device's maxPushConstantSize of %u. %s",
4597                                caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_1bc002e4]);
4598            }
4599            if (size > maxPushConstantsSize - offset) {
4600                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4601                                __LINE__, VALIDATION_ERROR_1bc002e6, "DS",
4602                                "%s call has push constants index %u with offset %u and size %u that "
4603                                "exceeds this device's maxPushConstantSize of %u. %s",
4604                                caller_name, index, offset, size, maxPushConstantsSize,
4605                                validation_error_map[VALIDATION_ERROR_1bc002e6]);
4606            }
4607        } else {
4608            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4609                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4610        }
4611    }
4612    // size needs to be non-zero and a multiple of 4.
4613    if ((size == 0) || ((size & 0x3) != 0)) {
4614        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4615            if (size == 0) {
4616                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4617                                __LINE__, VALIDATION_ERROR_11a00250, "DS",
4618                                "%s call has push constants index %u with "
4619                                "size %u. Size must be greater than zero. %s",
4620                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_11a00250]);
4621            }
4622            if (size & 0x3) {
4623                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4624                                __LINE__, VALIDATION_ERROR_11a00252, "DS",
4625                                "%s call has push constants index %u with "
4626                                "size %u. Size must be a multiple of 4. %s",
4627                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_11a00252]);
4628            }
4629        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4630            if (size == 0) {
4631                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4632                                __LINE__, VALIDATION_ERROR_1bc2c21b, "DS",
4633                                "%s call has push constants index %u with "
4634                                "size %u. Size must be greater than zero. %s",
4635                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_1bc2c21b]);
4636            }
4637            if (size & 0x3) {
4638                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4639                                __LINE__, VALIDATION_ERROR_1bc002e2, "DS",
4640                                "%s call has push constants index %u with "
4641                                "size %u. Size must be a multiple of 4. %s",
4642                                caller_name, index, size, validation_error_map[VALIDATION_ERROR_1bc002e2]);
4643            }
4644        } else {
4645            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4646                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4647        }
4648    }
4649    // offset needs to be a multiple of 4.
4650    if ((offset & 0x3) != 0) {
4651        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4652            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4653                            __LINE__, VALIDATION_ERROR_11a0024e, "DS",
4654                            "%s call has push constants index %u with "
4655                            "offset %u. Offset must be a multiple of 4. %s",
4656                            caller_name, index, offset, validation_error_map[VALIDATION_ERROR_11a0024e]);
4657        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4658            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4659                            __LINE__, VALIDATION_ERROR_1bc002e0, "DS",
4660                            "%s call has push constants with "
4661                            "offset %u. Offset must be a multiple of 4. %s",
4662                            caller_name, offset, validation_error_map[VALIDATION_ERROR_1bc002e0]);
4663        } else {
4664            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4665                            __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4666        }
4667    }
4668    return skip;
4669}
4670
4671VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
4672                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
4673    bool skip = false;
4674    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4675    // TODO : Add checks for VALIDATION_ERRORS 865-870
4676    // Push Constant Range checks
4677    uint32_t i, j;
4678    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4679        skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
4680                                          pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
4681        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
4682            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4683                            __LINE__, VALIDATION_ERROR_11a2dc03, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
4684                            validation_error_map[VALIDATION_ERROR_11a2dc03]);
4685        }
4686    }
4687    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4688
4689    // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
4690    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4691        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
4692            if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
4693                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4694                                __LINE__, VALIDATION_ERROR_0fe00248, "DS",
4695                                "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d. %s", i, j,
4696                                validation_error_map[VALIDATION_ERROR_0fe00248]);
4697            }
4698        }
4699    }
4700
4701    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
4702    if (VK_SUCCESS == result) {
4703        lock_guard_t lock(global_lock);
4704        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
4705        plNode.layout = *pPipelineLayout;
4706        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
4707        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
4708            plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
4709        }
4710        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
4711        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
4712            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
4713        }
4714    }
4715    return result;
4716}
4717
4718VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
4719                                                    const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
4720    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4721    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
4722    if (VK_SUCCESS == result) {
4723        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
4724        if (NULL == pNewNode) {
4725            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4726                        HandleToUint64(*pDescriptorPool), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4727                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
4728                return VK_ERROR_VALIDATION_FAILED_EXT;
4729        } else {
4730            lock_guard_t lock(global_lock);
4731            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
4732        }
4733    } else {
4734        // Need to do anything if pool create fails?
4735    }
4736    return result;
4737}
4738
4739VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4740                                                   VkDescriptorPoolResetFlags flags) {
4741    // TODO : Add checks for VALIDATION_ERROR_32a00272
4742    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4743    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
4744    if (VK_SUCCESS == result) {
4745        lock_guard_t lock(global_lock);
4746        clearDescriptorPool(dev_data, device, descriptorPool, flags);
4747    }
4748    return result;
4749}
4750// Ensure the pool contains enough descriptors and descriptor sets to satisfy
4751// an allocation request. Fills common_data with the total number of descriptors of each type required,
4752// as well as DescriptorSetLayout ptrs used for later update.
4753static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4754                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
4755    // Always update common data
4756    cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
4757    if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
4758    // All state checks for AllocateDescriptorSets is done in single function
4759    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
4760}
4761// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
4762static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4763                                                 VkDescriptorSet *pDescriptorSets,
4764                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
4765    // All the updates are contained in a single cvdescriptorset function
4766    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
4767                                                   &dev_data->setMap, dev_data);
4768}
4769
4770// TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
4771VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
4772                                                      VkDescriptorSet *pDescriptorSets) {
4773    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4774    unique_lock_t lock(global_lock);
4775    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
4776    bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
4777    lock.unlock();
4778
4779    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4780
4781    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
4782
4783    if (VK_SUCCESS == result) {
4784        lock.lock();
4785        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
4786        lock.unlock();
4787    }
4788    return result;
4789}
4790// Verify state before freeing DescriptorSets
4791static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
4792                                              const VkDescriptorSet *descriptor_sets) {
4793    if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
4794    bool skip = false;
4795    // First make sure sets being destroyed are not currently in-use
4796    for (uint32_t i = 0; i < count; ++i) {
4797        if (descriptor_sets[i] != VK_NULL_HANDLE) {
4798            skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
4799        }
4800    }
4801
4802    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
4803    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
4804        // Can't Free from a NON_FREE pool
4805        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4806                        HandleToUint64(pool), __LINE__, VALIDATION_ERROR_28600270, "DS",
4807                        "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
4808                        "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
4809                        validation_error_map[VALIDATION_ERROR_28600270]);
4810    }
4811    return skip;
4812}
4813// Sets have been removed from the pool so update underlying state
4814static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
4815                                             const VkDescriptorSet *descriptor_sets) {
4816    DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
4817    // Update available descriptor sets in pool
4818    pool_state->availableSets += count;
4819
4820    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
4821    for (uint32_t i = 0; i < count; ++i) {
4822        if (descriptor_sets[i] != VK_NULL_HANDLE) {
4823            auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
4824            uint32_t type_index = 0, descriptor_count = 0;
4825            for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
4826                type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
4827                descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
4828                pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
4829            }
4830            freeDescriptorSet(dev_data, descriptor_set);
4831            pool_state->sets.erase(descriptor_set);
4832        }
4833    }
4834}
4835
4836VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
4837                                                  const VkDescriptorSet *pDescriptorSets) {
4838    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4839    // Make sure that no sets being destroyed are in-flight
4840    unique_lock_t lock(global_lock);
4841    bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
4842    lock.unlock();
4843
4844    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4845    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
4846    if (VK_SUCCESS == result) {
4847        lock.lock();
4848        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
4849        lock.unlock();
4850    }
4851    return result;
4852}
4853// TODO : This is a Proof-of-concept for core validation architecture
4854//  Really we'll want to break out these functions to separate files but
4855//  keeping it all together here to prove out design
4856// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
4857static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
4858                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4859                                                const VkCopyDescriptorSet *pDescriptorCopies) {
4860    if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
4861    // First thing to do is perform map look-ups.
4862    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
4863    //  so we can't just do a single map look-up up-front, but do them individually in functions below
4864
4865    // Now make call(s) that validate state, but don't perform state updates in this function
4866    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
4867    //  namespace which will parse params and make calls into specific class instances
4868    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
4869                                                         descriptorCopyCount, pDescriptorCopies);
4870}
4871// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
4872static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
4873                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4874                                               const VkCopyDescriptorSet *pDescriptorCopies) {
4875    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4876                                                 pDescriptorCopies);
4877}
4878
4879VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
4880                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
4881                                                const VkCopyDescriptorSet *pDescriptorCopies) {
4882    // Only map look-up at top level is for device-level layer_data
4883    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4884    unique_lock_t lock(global_lock);
4885    bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4886                                                    pDescriptorCopies);
4887    lock.unlock();
4888    if (!skip) {
4889        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4890                                                      pDescriptorCopies);
4891        lock.lock();
4892        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
4893        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
4894                                           pDescriptorCopies);
4895    }
4896}
4897
4898VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
4899                                                      VkCommandBuffer *pCommandBuffer) {
4900    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4901    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
4902    if (VK_SUCCESS == result) {
4903        unique_lock_t lock(global_lock);
4904        auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
4905
4906        if (pPool) {
4907            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
4908                // Add command buffer to its commandPool map
4909                pPool->commandBuffers.push_back(pCommandBuffer[i]);
4910                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
4911                // Add command buffer to map
4912                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
4913                resetCB(dev_data, pCommandBuffer[i]);
4914                pCB->createInfo = *pCreateInfo;
4915                pCB->device = device;
4916            }
4917        }
4918        lock.unlock();
4919    }
4920    return result;
4921}
4922
4923// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
4924static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
4925    addCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
4926                            cb_state);
4927    for (auto attachment : fb_state->attachments) {
4928        auto view_state = attachment.view_state;
4929        if (view_state) {
4930            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
4931        }
4932        auto rp_state = GetRenderPassState(dev_data, fb_state->createInfo.renderPass);
4933        if (rp_state) {
4934            addCommandBufferBinding(&rp_state->cb_bindings, {HandleToUint64(rp_state->renderPass), kVulkanObjectTypeRenderPass},
4935                                    cb_state);
4936        }
4937    }
4938}
4939
4940VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
4941    bool skip = false;
4942    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
4943    unique_lock_t lock(global_lock);
4944    // Validate command buffer level
4945    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
4946    if (cb_node) {
4947        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
4948        if (cb_node->in_use.load()) {
4949            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4950                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "MEM",
4951                            "Calling vkBeginCommandBuffer() on active command buffer %p before it has completed. "
4952                            "You must check command buffer fence before this call. %s",
4953                            commandBuffer, validation_error_map[VALIDATION_ERROR_16e00062]);
4954        }
4955        clear_cmd_buf_and_mem_references(dev_data, cb_node);
4956        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
4957            // Secondary Command Buffer
4958            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
4959            if (!pInfo) {
4960                skip |=
4961                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4962                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00066, "DS",
4963                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s", commandBuffer,
4964                            validation_error_map[VALIDATION_ERROR_16e00066]);
4965            } else {
4966                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
4967                    assert(pInfo->renderPass);
4968                    string errorString = "";
4969                    auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
4970                    if (framebuffer) {
4971                        if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
4972                            !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
4973                                                             GetRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
4974                                                             errorString)) {
4975                            // renderPass that framebuffer was created with must be compatible with local renderPass
4976                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4977                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4978                                            VALIDATION_ERROR_0280006e, "DS",
4979                                            "vkBeginCommandBuffer(): Secondary Command "
4980                                            "Buffer (0x%p) renderPass (0x%" PRIxLEAST64
4981                                            ") is incompatible w/ framebuffer "
4982                                            "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
4983                                            commandBuffer, HandleToUint64(pInfo->renderPass), HandleToUint64(pInfo->framebuffer),
4984                                            HandleToUint64(framebuffer->createInfo.renderPass), errorString.c_str(),
4985                                            validation_error_map[VALIDATION_ERROR_0280006e]);
4986                        }
4987                        // Connect this framebuffer and its children to this cmdBuffer
4988                        AddFramebufferBinding(dev_data, cb_node, framebuffer);
4989                    }
4990                }
4991                if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
4992                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
4993                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4994                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
4995                                    VALIDATION_ERROR_16e00068, "DS",
4996                                    "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
4997                                    "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
4998                                    "support precise occlusion queries. %s",
4999                                    commandBuffer, validation_error_map[VALIDATION_ERROR_16e00068]);
5000                }
5001            }
5002            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
5003                auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
5004                if (renderPass) {
5005                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
5006                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5007                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
5008                                        VALIDATION_ERROR_0280006c, "DS",
5009                                        "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
5010                                        "that is less than the number of subpasses (%d). %s",
5011                                        commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
5012                                        validation_error_map[VALIDATION_ERROR_0280006c]);
5013                    }
5014                }
5015            }
5016        }
5017        if (CB_RECORDING == cb_node->state) {
5018            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5019                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "DS",
5020                            "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
5021                            ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
5022                            commandBuffer, validation_error_map[VALIDATION_ERROR_16e00062]);
5023        } else if (CB_RECORDED == cb_node->state || CB_INVALID_COMPLETE == cb_node->state) {
5024            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
5025            auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5026            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5027                skip |=
5028                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5029                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00064, "DS",
5030                            "Call to vkBeginCommandBuffer() on command buffer (0x%p"
5031                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
5032                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5033                            commandBuffer, HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_16e00064]);
5034            }
5035            resetCB(dev_data, commandBuffer);
5036        }
5037        // Set updated state here in case implicit reset occurs above
5038        cb_node->state = CB_RECORDING;
5039        cb_node->beginInfo = *pBeginInfo;
5040        if (cb_node->beginInfo.pInheritanceInfo) {
5041            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
5042            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
5043            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
5044            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
5045                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5046                cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
5047                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
5048                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
5049                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
5050            }
5051        }
5052    }
5053    lock.unlock();
5054    if (skip) {
5055        return VK_ERROR_VALIDATION_FAILED_EXT;
5056    }
5057    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
5058
5059    return result;
5060}
5061
5062VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
5063    bool skip = false;
5064    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5065    unique_lock_t lock(global_lock);
5066    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5067    if (pCB) {
5068        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
5069            !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5070            // This needs spec clarification to update valid usage, see comments in PR:
5071            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
5072            skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_27400078);
5073        }
5074        skip |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
5075        for (auto query : pCB->activeQueries) {
5076            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5077                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_2740007a, "DS",
5078                            "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
5079                            HandleToUint64(query.pool), query.index, validation_error_map[VALIDATION_ERROR_2740007a]);
5080        }
5081    }
5082    if (!skip) {
5083        lock.unlock();
5084        auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
5085        lock.lock();
5086        if (VK_SUCCESS == result) {
5087            pCB->state = CB_RECORDED;
5088        }
5089        return result;
5090    } else {
5091        return VK_ERROR_VALIDATION_FAILED_EXT;
5092    }
5093}
5094
5095VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
5096    bool skip = false;
5097    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5098    unique_lock_t lock(global_lock);
5099    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5100    VkCommandPool cmdPool = pCB->createInfo.commandPool;
5101    auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5102    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5103        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5104                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_3260005c, "DS",
5105                        "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
5106                        ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5107                        commandBuffer, HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_3260005c]);
5108    }
5109    skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_3260005a);
5110    lock.unlock();
5111    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5112    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
5113    if (VK_SUCCESS == result) {
5114        lock.lock();
5115        resetCB(dev_data, commandBuffer);
5116        lock.unlock();
5117    }
5118    return result;
5119}
5120
5121VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5122                                           VkPipeline pipeline) {
5123    bool skip = false;
5124    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5125    unique_lock_t lock(global_lock);
5126    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5127    if (cb_state) {
5128        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5129                                      VALIDATION_ERROR_18002415);
5130        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
5131        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
5132            skip |=
5133                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5134                        HandleToUint64(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
5135                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
5136                        HandleToUint64(pipeline), HandleToUint64(cb_state->activeRenderPass->renderPass));
5137        }
5138        // TODO: VALIDATION_ERROR_18000612 VALIDATION_ERROR_18000616
5139
5140        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
5141        if (pipe_state) {
5142            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
5143            set_cb_pso_status(cb_state, pipe_state);
5144            set_pipeline_state(pipe_state);
5145            skip |= validate_dual_src_blend_feature(dev_data, pipe_state);
5146        } else {
5147            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5148                            HandleToUint64(pipeline), __LINE__, VALIDATION_ERROR_18027e01, "DS",
5149                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", HandleToUint64(pipeline),
5150                            validation_error_map[VALIDATION_ERROR_18027e01]);
5151        }
5152        addCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
5153        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
5154            // Add binding for child renderpass
5155            auto rp_state = GetRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
5156            if (rp_state) {
5157                addCommandBufferBinding(&rp_state->cb_bindings, {HandleToUint64(rp_state->renderPass), kVulkanObjectTypeRenderPass},
5158                                        cb_state);
5159            }
5160        }
5161    }
5162    lock.unlock();
5163    if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
5164}
5165
5166VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
5167                                          const VkViewport *pViewports) {
5168    bool skip = false;
5169    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5170    unique_lock_t lock(global_lock);
5171    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5172    if (pCB) {
5173        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1e002415);
5174        skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
5175        pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
5176    }
5177    lock.unlock();
5178    if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
5179}
5180
5181VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
5182                                         const VkRect2D *pScissors) {
5183    bool skip = false;
5184    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5185    unique_lock_t lock(global_lock);
5186    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5187    if (pCB) {
5188        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d802415);
5189        skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
5190        pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
5191    }
5192    lock.unlock();
5193    if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
5194}
5195
5196VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
5197    bool skip = false;
5198    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5199    unique_lock_t lock(global_lock);
5200    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5201    if (pCB) {
5202        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d602415);
5203        skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
5204        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
5205
5206        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
5207        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
5208            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5209                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1d600626, "DS",
5210                            "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
5211                            "flag.  This is undefined behavior and could be ignored. %s",
5212                            validation_error_map[VALIDATION_ERROR_1d600626]);
5213        } else {
5214            skip |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, kVulkanObjectTypeCommandBuffer, HandleToUint64(commandBuffer),
5215                                    lineWidth);
5216        }
5217    }
5218    lock.unlock();
5219    if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
5220}
5221
5222VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
5223                                           float depthBiasSlopeFactor) {
5224    bool skip = false;
5225    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5226    unique_lock_t lock(global_lock);
5227    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5228    if (pCB) {
5229        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1cc02415);
5230        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
5231        if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
5232            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5233                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1cc0062c, "DS",
5234                            "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp "
5235                            "parameter must be set to 0.0. %s",
5236                            validation_error_map[VALIDATION_ERROR_1cc0062c]);
5237        }
5238        if (!skip) {
5239            pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
5240        }
5241    }
5242    lock.unlock();
5243    if (!skip)
5244        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
5245}
5246
5247VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
5248    bool skip = false;
5249    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5250    unique_lock_t lock(global_lock);
5251    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5252    if (pCB) {
5253        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ca02415);
5254        skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
5255        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
5256    }
5257    lock.unlock();
5258    if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
5259}
5260
5261VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
5262    bool skip = false;
5263    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5264    unique_lock_t lock(global_lock);
5265    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5266    if (pCB) {
5267        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ce02415);
5268        skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
5269        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
5270    }
5271    lock.unlock();
5272    if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
5273}
5274
5275VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
5276                                                    uint32_t compareMask) {
5277    bool skip = false;
5278    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5279    unique_lock_t lock(global_lock);
5280    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5281    if (pCB) {
5282        skip |=
5283            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1da02415);
5284        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
5285        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
5286    }
5287    lock.unlock();
5288    if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
5289}
5290
5291VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
5292    bool skip = false;
5293    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5294    unique_lock_t lock(global_lock);
5295    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5296    if (pCB) {
5297        skip |=
5298            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1de02415);
5299        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
5300        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
5301    }
5302    lock.unlock();
5303    if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
5304}
5305
5306VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
5307    bool skip = false;
5308    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5309    unique_lock_t lock(global_lock);
5310    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5311    if (pCB) {
5312        skip |=
5313            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1dc02415);
5314        skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
5315        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
5316    }
5317    lock.unlock();
5318    if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
5319}
5320
5321VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5322                                                 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
5323                                                 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
5324                                                 const uint32_t *pDynamicOffsets) {
5325    bool skip = false;
5326    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5327    unique_lock_t lock(global_lock);
5328    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5329    if (cb_state) {
5330        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5331                                      VALIDATION_ERROR_17c02415);
5332        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
5333        // Track total count of dynamic descriptor types to make sure we have an offset for each one
5334        uint32_t total_dynamic_descriptors = 0;
5335        string error_string = "";
5336        uint32_t last_set_index = firstSet + setCount - 1;
5337        if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
5338            cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
5339            cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
5340        }
5341        auto old_final_bound_set = cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index];
5342        auto pipeline_layout = getPipelineLayout(dev_data, layout);
5343        for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
5344            cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(dev_data, pDescriptorSets[set_idx]);
5345            if (descriptor_set) {
5346                cb_state->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
5347                cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[set_idx + firstSet] = descriptor_set;
5348                if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
5349                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
5350                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
5351                                    __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
5352                                    "Descriptor Set 0x%" PRIxLEAST64
5353                                    " bound but it was never updated. You may want to either update it or not bind it.",
5354                                    HandleToUint64(pDescriptorSets[set_idx]));
5355                }
5356                // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
5357                if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
5358                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5359                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
5360                                    __LINE__, VALIDATION_ERROR_17c002cc, "DS",
5361                                    "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
5362                                    "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
5363                                    set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str(),
5364                                    validation_error_map[VALIDATION_ERROR_17c002cc]);
5365                }
5366
5367                auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
5368
5369                cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx].clear();
5370
5371                if (set_dynamic_descriptor_count) {
5372                    // First make sure we won't overstep bounds of pDynamicOffsets array
5373                    if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
5374                        skip |= log_msg(
5375                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5376                            HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
5377                            "descriptorSet #%u (0x%" PRIxLEAST64
5378                            ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
5379                            "array. There must be one dynamic offset for each dynamic descriptor being bound.",
5380                            set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
5381                            (dynamicOffsetCount - total_dynamic_descriptors));
5382                    } else {  // Validate and store dynamic offsets with the set
5383                        // Validate Dynamic Offset Minimums
5384                        uint32_t cur_dyn_offset = total_dynamic_descriptors;
5385                        for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
5386                            if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
5387                                if (SafeModulo(
5388                                        pDynamicOffsets[cur_dyn_offset],
5389                                        dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
5390                                    skip |=
5391                                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5392                                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
5393                                                VALIDATION_ERROR_17c002d4, "DS",
5394                                                "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
5395                                                "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
5396                                                cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
5397                                                dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5398                                                validation_error_map[VALIDATION_ERROR_17c002d4]);
5399                                }
5400                                cur_dyn_offset++;
5401                            } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
5402                                if (SafeModulo(
5403                                        pDynamicOffsets[cur_dyn_offset],
5404                                        dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
5405                                    skip |=
5406                                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5407                                                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
5408                                                VALIDATION_ERROR_17c002d4, "DS",
5409                                                "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
5410                                                "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
5411                                                cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
5412                                                dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
5413                                                validation_error_map[VALIDATION_ERROR_17c002d4]);
5414                                }
5415                                cur_dyn_offset++;
5416                            }
5417                        }
5418
5419                        cb_state->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx] =
5420                            std::vector<uint32_t>(pDynamicOffsets + total_dynamic_descriptors,
5421                                                  pDynamicOffsets + total_dynamic_descriptors + set_dynamic_descriptor_count);
5422                        // Keep running total of dynamic descriptor count to verify at the end
5423                        total_dynamic_descriptors += set_dynamic_descriptor_count;
5424                    }
5425                }
5426            } else {
5427                skip |=
5428                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5429                            HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_SET, "DS",
5430                            "Attempt to bind descriptor set 0x%" PRIxLEAST64 " that doesn't exist!",
5431                            HandleToUint64(pDescriptorSets[set_idx]));
5432            }
5433            // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
5434            if (firstSet > 0) {  // Check set #s below the first bound set
5435                for (uint32_t i = 0; i < firstSet; ++i) {
5436                    if (cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
5437                        !verify_set_layout_compatibility(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i],
5438                                                         pipeline_layout, i, error_string)) {
5439                        skip |= log_msg(
5440                            dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
5441                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5442                            HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i]), __LINE__, DRAWSTATE_NONE,
5443                            "DS", "DescriptorSet 0x%" PRIxLEAST64
5444                                  " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
5445                            HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i]), i,
5446                            HandleToUint64(layout));
5447                        cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
5448                    }
5449                }
5450            }
5451            // Check if newly last bound set invalidates any remaining bound sets
5452            if ((cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (last_set_index)) {
5453                if (old_final_bound_set &&
5454                    !verify_set_layout_compatibility(old_final_bound_set, pipeline_layout, last_set_index, error_string)) {
5455                    auto old_set = old_final_bound_set->GetSet();
5456                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
5457                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(old_set), __LINE__,
5458                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
5459                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
5460                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
5461                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
5462                                    HandleToUint64(old_set), last_set_index,
5463                                    HandleToUint64(cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[last_set_index]),
5464                                    last_set_index, last_set_index + 1, HandleToUint64(layout));
5465                    cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
5466                }
5467            }
5468        }
5469        //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
5470        if (total_dynamic_descriptors != dynamicOffsetCount) {
5471            skip |=
5472                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5473                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_17c002ce, "DS",
5474                        "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
5475                        "is %u. It should exactly match the number of dynamic descriptors. %s",
5476                        setCount, total_dynamic_descriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_17c002ce]);
5477        }
5478    }
5479    lock.unlock();
5480    if (!skip)
5481        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
5482                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
5483}
5484
5485VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5486                                              VkIndexType indexType) {
5487    bool skip = false;
5488    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5489    // TODO : Somewhere need to verify that IBs have correct usage state flagged
5490    unique_lock_t lock(global_lock);
5491
5492    auto buffer_state = GetBufferState(dev_data, buffer);
5493    auto cb_node = GetCBNode(dev_data, commandBuffer);
5494    if (cb_node && buffer_state) {
5495        skip |=
5496            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_17e02415);
5497        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
5498        skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_17e00364);
5499        std::function<bool()> function = [=]() {
5500            return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
5501        };
5502        cb_node->validate_functions.push_back(function);
5503        VkDeviceSize offset_align = 0;
5504        switch (indexType) {
5505            case VK_INDEX_TYPE_UINT16:
5506                offset_align = 2;
5507                break;
5508            case VK_INDEX_TYPE_UINT32:
5509                offset_align = 4;
5510                break;
5511            default:
5512                // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
5513                break;
5514        }
5515        if (!offset_align || (offset % offset_align)) {
5516            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5517                            HandleToUint64(commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
5518                            "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
5519                            string_VkIndexType(indexType));
5520        }
5521        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
5522    } else {
5523        assert(0);
5524    }
5525    lock.unlock();
5526    if (!skip) dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
5527}
5528
5529void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
5530    uint32_t end = firstBinding + bindingCount;
5531    if (pCB->currentDrawData.buffers.size() < end) {
5532        pCB->currentDrawData.buffers.resize(end);
5533    }
5534    for (uint32_t i = 0; i < bindingCount; ++i) {
5535        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
5536    }
5537}
5538
5539static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
5540
5541VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
5542                                                const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
5543    bool skip = false;
5544    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5545    // TODO : Somewhere need to verify that VBs have correct usage state flagged
5546    unique_lock_t lock(global_lock);
5547
5548    auto cb_node = GetCBNode(dev_data, commandBuffer);
5549    if (cb_node) {
5550        skip |=
5551            ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_18202415);
5552        skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffers()");
5553        for (uint32_t i = 0; i < bindingCount; ++i) {
5554            auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
5555            assert(buffer_state);
5556            skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_182004e8);
5557            std::function<bool()> function = [=]() {
5558                return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
5559            };
5560            cb_node->validate_functions.push_back(function);
5561            if (pOffsets[i] >= buffer_state->createInfo.size) {
5562                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5563                                HandleToUint64(buffer_state->buffer), __LINE__, VALIDATION_ERROR_182004e4, "DS",
5564                                "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer. %s",
5565                                pOffsets[i], validation_error_map[VALIDATION_ERROR_182004e4]);
5566            }
5567        }
5568        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
5569    } else {
5570        assert(0);
5571    }
5572    lock.unlock();
5573    if (!skip) dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
5574}
5575
5576// Expects global_lock to be held by caller
5577static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5578    for (auto imageView : pCB->updateImages) {
5579        auto view_state = GetImageViewState(dev_data, imageView);
5580        if (!view_state) continue;
5581
5582        auto image_state = GetImageState(dev_data, view_state->create_info.image);
5583        assert(image_state);
5584        std::function<bool()> function = [=]() {
5585            SetImageMemoryValid(dev_data, image_state, true);
5586            return false;
5587        };
5588        pCB->validate_functions.push_back(function);
5589    }
5590    for (auto buffer : pCB->updateBuffers) {
5591        auto buffer_state = GetBufferState(dev_data, buffer);
5592        assert(buffer_state);
5593        std::function<bool()> function = [=]() {
5594            SetBufferMemoryValid(dev_data, buffer_state, true);
5595            return false;
5596        };
5597        pCB->validate_functions.push_back(function);
5598    }
5599}
5600
5601// Generic function to handle validation for all CmdDraw* type functions
5602static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
5603                                CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
5604                                UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
5605                                UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
5606    bool skip = false;
5607    *cb_state = GetCBNode(dev_data, cmd_buffer);
5608    if (*cb_state) {
5609        skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
5610        skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
5611        skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
5612        skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
5613                                                                : insideRenderPass(dev_data, *cb_state, caller, msg_code);
5614    }
5615    return skip;
5616}
5617
5618// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
5619static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5620    UpdateDrawState(dev_data, cb_state, bind_point);
5621    MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
5622}
5623
5624// Generic function to handle state update for all CmdDraw* type functions
5625static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5626    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
5627    updateResourceTrackingOnDraw(cb_state);
5628    cb_state->hasDrawCmd = true;
5629}
5630
5631static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
5632                                   GLOBAL_CB_NODE **cb_state, const char *caller) {
5633    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5634                               VALIDATION_ERROR_1a202415, VALIDATION_ERROR_1a200017, VALIDATION_ERROR_1a200376);
5635}
5636
5637static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5638    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5639}
5640
5641VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
5642                                   uint32_t firstVertex, uint32_t firstInstance) {
5643    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5644    GLOBAL_CB_NODE *cb_state = nullptr;
5645    unique_lock_t lock(global_lock);
5646    bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
5647    lock.unlock();
5648    if (!skip) {
5649        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
5650        lock.lock();
5651        PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
5652        lock.unlock();
5653    }
5654}
5655
5656static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
5657                                          VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
5658    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5659                               VALIDATION_ERROR_1a402415, VALIDATION_ERROR_1a400017, VALIDATION_ERROR_1a40039c);
5660}
5661
5662static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5663    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5664}
5665
5666VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
5667                                          uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
5668    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5669    GLOBAL_CB_NODE *cb_state = nullptr;
5670    unique_lock_t lock(global_lock);
5671    bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
5672                                              "vkCmdDrawIndexed()");
5673    lock.unlock();
5674    if (!skip) {
5675        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
5676        lock.lock();
5677        PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
5678        lock.unlock();
5679    }
5680}
5681
5682static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5683                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
5684                                           const char *caller) {
5685    bool skip =
5686        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
5687                            VALIDATION_ERROR_1aa02415, VALIDATION_ERROR_1aa00017, VALIDATION_ERROR_1aa003cc);
5688    *buffer_state = GetBufferState(dev_data, buffer);
5689    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1aa003b4);
5690    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
5691    // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
5692    return skip;
5693}
5694
5695static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5696                                          BUFFER_STATE *buffer_state) {
5697    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5698    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5699}
5700
5701VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
5702                                           uint32_t stride) {
5703    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5704    GLOBAL_CB_NODE *cb_state = nullptr;
5705    BUFFER_STATE *buffer_state = nullptr;
5706    unique_lock_t lock(global_lock);
5707    bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
5708                                               &buffer_state, "vkCmdDrawIndirect()");
5709    lock.unlock();
5710    if (!skip) {
5711        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
5712        lock.lock();
5713        PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
5714        lock.unlock();
5715    }
5716}
5717
5718static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5719                                                  VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
5720                                                  BUFFER_STATE **buffer_state, const char *caller) {
5721    bool skip =
5722        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
5723                            VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1a602415, VALIDATION_ERROR_1a600017, VALIDATION_ERROR_1a600434);
5724    *buffer_state = GetBufferState(dev_data, buffer);
5725    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a60041c);
5726    // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
5727    // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
5728    // 'buffer'.
5729    return skip;
5730}
5731
5732static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5733                                                 BUFFER_STATE *buffer_state) {
5734    UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
5735    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5736}
5737
5738VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
5739                                                  uint32_t count, uint32_t stride) {
5740    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5741    GLOBAL_CB_NODE *cb_state = nullptr;
5742    BUFFER_STATE *buffer_state = nullptr;
5743    unique_lock_t lock(global_lock);
5744    bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
5745                                                      &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
5746    lock.unlock();
5747    if (!skip) {
5748        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
5749        lock.lock();
5750        PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
5751        lock.unlock();
5752    }
5753}
5754
5755static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
5756                                       VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
5757    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
5758                               VALIDATION_ERROR_19c02415, VALIDATION_ERROR_19c00017, VALIDATION_ERROR_UNDEFINED);
5759}
5760
5761static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
5762    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
5763}
5764
5765VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
5766    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5767    GLOBAL_CB_NODE *cb_state = nullptr;
5768    unique_lock_t lock(global_lock);
5769    bool skip =
5770        PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
5771    lock.unlock();
5772    if (!skip) {
5773        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
5774        lock.lock();
5775        PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
5776        lock.unlock();
5777    }
5778}
5779
5780static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
5781                                               VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
5782                                               BUFFER_STATE **buffer_state, const char *caller) {
5783    bool skip =
5784        ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
5785                            VALIDATION_ERROR_1a002415, VALIDATION_ERROR_1a000017, VALIDATION_ERROR_UNDEFINED);
5786    *buffer_state = GetBufferState(dev_data, buffer);
5787    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a000322);
5788    return skip;
5789}
5790
5791static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
5792                                              BUFFER_STATE *buffer_state) {
5793    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
5794    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
5795}
5796
5797VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
5798    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5799    GLOBAL_CB_NODE *cb_state = nullptr;
5800    BUFFER_STATE *buffer_state = nullptr;
5801    unique_lock_t lock(global_lock);
5802    bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
5803                                                   &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
5804    lock.unlock();
5805    if (!skip) {
5806        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
5807        lock.lock();
5808        PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
5809        lock.unlock();
5810    }
5811}
5812
5813VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
5814                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
5815    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5816    unique_lock_t lock(global_lock);
5817
5818    auto cb_node = GetCBNode(device_data, commandBuffer);
5819    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
5820    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
5821
5822    if (cb_node && src_buffer_state && dst_buffer_state) {
5823        bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
5824        if (!skip) {
5825            PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
5826            lock.unlock();
5827            device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
5828        }
5829    } else {
5830        lock.unlock();
5831        assert(0);
5832    }
5833}
5834
5835VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5836                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5837                                        const VkImageCopy *pRegions) {
5838    bool skip = false;
5839    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5840    unique_lock_t lock(global_lock);
5841
5842    auto cb_node = GetCBNode(device_data, commandBuffer);
5843    auto src_image_state = GetImageState(device_data, srcImage);
5844    auto dst_image_state = GetImageState(device_data, dstImage);
5845    if (cb_node && src_image_state && dst_image_state) {
5846        skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
5847                                           srcImageLayout, dstImageLayout);
5848        if (!skip) {
5849            PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
5850                                      dstImageLayout);
5851            lock.unlock();
5852            device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5853                                                     pRegions);
5854        }
5855    } else {
5856        lock.unlock();
5857        assert(0);
5858    }
5859}
5860
5861// Validate that an image's sampleCount matches the requirement for a specific API call
5862bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
5863                              const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
5864    bool skip = false;
5865    if (image_state->createInfo.samples != sample_count) {
5866        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
5867                       HandleToUint64(image_state->image), 0, msgCode, "DS",
5868                       "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
5869                       HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
5870                       string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
5871    }
5872    return skip;
5873}
5874
5875VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5876                                        VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
5877                                        const VkImageBlit *pRegions, VkFilter filter) {
5878    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5879    unique_lock_t lock(global_lock);
5880
5881    auto cb_node = GetCBNode(dev_data, commandBuffer);
5882    auto src_image_state = GetImageState(dev_data, srcImage);
5883    auto dst_image_state = GetImageState(dev_data, dstImage);
5884
5885    bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, filter);
5886
5887    if (!skip) {
5888        PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state);
5889        lock.unlock();
5890        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
5891                                              pRegions, filter);
5892    }
5893}
5894
5895VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
5896                                                VkImageLayout dstImageLayout, uint32_t regionCount,
5897                                                const VkBufferImageCopy *pRegions) {
5898    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5899    unique_lock_t lock(global_lock);
5900    bool skip = false;
5901    auto cb_node = GetCBNode(device_data, commandBuffer);
5902    auto src_buffer_state = GetBufferState(device_data, srcBuffer);
5903    auto dst_image_state = GetImageState(device_data, dstImage);
5904    if (cb_node && src_buffer_state && dst_image_state) {
5905        skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
5906                                                        regionCount, pRegions, "vkCmdCopyBufferToImage()");
5907    } else {
5908        lock.unlock();
5909        assert(0);
5910        // TODO: report VU01244 here, or put in object tracker?
5911    }
5912    if (!skip) {
5913        PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
5914                                          dstImageLayout);
5915        lock.unlock();
5916        device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
5917    }
5918}
5919
5920VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
5921                                                VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
5922    bool skip = false;
5923    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5924    unique_lock_t lock(global_lock);
5925
5926    auto cb_node = GetCBNode(device_data, commandBuffer);
5927    auto src_image_state = GetImageState(device_data, srcImage);
5928    auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
5929    if (cb_node && src_image_state && dst_buffer_state) {
5930        skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
5931                                                        regionCount, pRegions, "vkCmdCopyImageToBuffer()");
5932    } else {
5933        lock.unlock();
5934        assert(0);
5935        // TODO: report VU01262 here, or put in object tracker?
5936    }
5937    if (!skip) {
5938        PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
5939                                          srcImageLayout);
5940        lock.unlock();
5941        device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
5942    }
5943}
5944
5945static bool PreCallCmdUpdateBuffer(layer_data *device_data, const GLOBAL_CB_NODE *cb_state, const BUFFER_STATE *dst_buffer_state) {
5946    bool skip = false;
5947    skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400046);
5948    // Validate that DST buffer has correct usage flags set
5949    skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
5950                                     VALIDATION_ERROR_1e400044, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
5951    skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
5952                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1e402415);
5953    skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
5954    skip |= insideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400017);
5955    return skip;
5956}
5957
5958static void PostCallRecordCmdUpdateBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buffer_state) {
5959    // Update bindings between buffer and cmd buffer
5960    AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
5961    std::function<bool()> function = [=]() {
5962        SetBufferMemoryValid(device_data, dst_buffer_state, true);
5963        return false;
5964    };
5965    cb_state->validate_functions.push_back(function);
5966}
5967
5968VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5969                                           VkDeviceSize dataSize, const uint32_t *pData) {
5970    bool skip = false;
5971    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5972    unique_lock_t lock(global_lock);
5973
5974    auto cb_state = GetCBNode(dev_data, commandBuffer);
5975    assert(cb_state);
5976    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
5977    assert(dst_buff_state);
5978    skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
5979    lock.unlock();
5980    if (!skip) {
5981        dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
5982        lock.lock();
5983        PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
5984        lock.unlock();
5985    }
5986}
5987
5988VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
5989                                         VkDeviceSize size, uint32_t data) {
5990    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5991    unique_lock_t lock(global_lock);
5992    auto cb_node = GetCBNode(device_data, commandBuffer);
5993    auto buffer_state = GetBufferState(device_data, dstBuffer);
5994
5995    if (cb_node && buffer_state) {
5996        bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
5997        if (!skip) {
5998            PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
5999            lock.unlock();
6000            device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
6001        }
6002    } else {
6003        lock.unlock();
6004        assert(0);
6005    }
6006}
6007
6008VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
6009                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
6010                                               const VkClearRect *pRects) {
6011    bool skip = false;
6012    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6013    {
6014        lock_guard_t lock(global_lock);
6015        skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6016    }
6017    if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6018}
6019
6020VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6021                                              const VkClearColorValue *pColor, uint32_t rangeCount,
6022                                              const VkImageSubresourceRange *pRanges) {
6023    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6024    unique_lock_t lock(global_lock);
6025
6026    bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6027    if (!skip) {
6028        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6029        lock.unlock();
6030        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
6031    }
6032}
6033
6034VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6035                                                     const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
6036                                                     const VkImageSubresourceRange *pRanges) {
6037    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6038    unique_lock_t lock(global_lock);
6039
6040    bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6041    if (!skip) {
6042        PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6043        lock.unlock();
6044        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
6045    }
6046}
6047
6048VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6049                                           VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
6050                                           const VkImageResolve *pRegions) {
6051    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6052    unique_lock_t lock(global_lock);
6053
6054    auto cb_node = GetCBNode(dev_data, commandBuffer);
6055    auto src_image_state = GetImageState(dev_data, srcImage);
6056    auto dst_image_state = GetImageState(dev_data, dstImage);
6057
6058    bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
6059
6060    if (!skip) {
6061        PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
6062        lock.unlock();
6063        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
6064                                                 pRegions);
6065    }
6066}
6067
6068VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
6069                                                     VkSubresourceLayout *pLayout) {
6070    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6071
6072    bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
6073    if (!skip) {
6074        device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
6075    }
6076}
6077
6078bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6079    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6080    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6081    if (pCB) {
6082        pCB->eventToStageMap[event] = stageMask;
6083    }
6084    auto queue_data = dev_data->queueMap.find(queue);
6085    if (queue_data != dev_data->queueMap.end()) {
6086        queue_data->second.eventToStageMap[event] = stageMask;
6087    }
6088    return false;
6089}
6090
6091VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6092    bool skip = false;
6093    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6094    unique_lock_t lock(global_lock);
6095    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6096    if (pCB) {
6097        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6098                                      VALIDATION_ERROR_1d402415);
6099        skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
6100        skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_1d400017);
6101        skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_1d4008fc,
6102                                             VALIDATION_ERROR_1d4008fe);
6103        auto event_state = GetEventNode(dev_data, event);
6104        if (event_state) {
6105            addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6106            event_state->cb_bindings.insert(pCB);
6107        }
6108        pCB->events.push_back(event);
6109        if (!pCB->waitedEvents.count(event)) {
6110            pCB->writeEventsBeforeWait.push_back(event);
6111        }
6112        pCB->eventUpdates.emplace_back([=](VkQueue q){return setEventStageMask(q, commandBuffer, event, stageMask);});
6113    }
6114    lock.unlock();
6115    if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
6116}
6117
6118VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6119    bool skip = false;
6120    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6121    unique_lock_t lock(global_lock);
6122    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6123    if (pCB) {
6124        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6125                                      VALIDATION_ERROR_1c402415);
6126        skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
6127        skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_1c400017);
6128        skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_1c400904,
6129                                             VALIDATION_ERROR_1c400906);
6130        auto event_state = GetEventNode(dev_data, event);
6131        if (event_state) {
6132            addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6133            event_state->cb_bindings.insert(pCB);
6134        }
6135        pCB->events.push_back(event);
6136        if (!pCB->waitedEvents.count(event)) {
6137            pCB->writeEventsBeforeWait.push_back(event);
6138        }
6139        // TODO : Add check for VALIDATION_ERROR_32c008f8
6140        pCB->eventUpdates.emplace_back([=](VkQueue q){return setEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0));});
6141    }
6142    lock.unlock();
6143    if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
6144}
6145
6146// Validate VUs for Pipeline Barriers that are within a renderPass
6147// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
6148static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
6149                                               VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
6150                                               uint32_t mem_barrier_count, const VkMemoryBarrier *mem_barriers,
6151                                               uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers,
6152                                               uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers) {
6153    bool skip = false;
6154    auto rp_state = cb_state->activeRenderPass;
6155    auto rp_handle = HandleToUint64(rp_state->renderPass);
6156    if (!rp_state->hasSelfDependency[cb_state->activeSubpass]) {
6157        skip |=
6158            log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6159                    rp_handle, __LINE__, VALIDATION_ERROR_1b800928, "CORE",
6160                    "%s: Barriers cannot be set during subpass %d of renderPass 0x%" PRIx64 "with no self-dependency specified. %s",
6161                    funcName, cb_state->activeSubpass, rp_handle, validation_error_map[VALIDATION_ERROR_1b800928]);
6162    } else {
6163        assert(rp_state->subpass_to_dependency_index[cb_state->activeSubpass] != -1);
6164        const auto &sub_dep = rp_state->createInfo.pDependencies[rp_state->subpass_to_dependency_index[cb_state->activeSubpass]];
6165        const auto &sub_src_stage_mask = sub_dep.srcStageMask;
6166        const auto &sub_dst_stage_mask = sub_dep.dstStageMask;
6167        if (src_stage_mask != (sub_src_stage_mask & src_stage_mask)) {
6168            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6169                            rp_handle, __LINE__, VALIDATION_ERROR_1b80092a, "CORE",
6170                            "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask(0x%X) of "
6171                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6172                            funcName, src_stage_mask, sub_src_stage_mask, cb_state->activeSubpass, rp_handle,
6173                            validation_error_map[VALIDATION_ERROR_1b80092a]);
6174        }
6175        if (dst_stage_mask != (sub_dst_stage_mask & dst_stage_mask)) {
6176            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6177                            rp_handle, __LINE__, VALIDATION_ERROR_1b80092c, "CORE",
6178                            "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask(0x%X) of "
6179                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6180                            funcName, dst_stage_mask, sub_dst_stage_mask, cb_state->activeSubpass, rp_handle,
6181                            validation_error_map[VALIDATION_ERROR_1b80092c]);
6182        }
6183        const auto &sub_src_access_mask = sub_dep.srcAccessMask;
6184        const auto &sub_dst_access_mask = sub_dep.dstAccessMask;
6185        for (uint32_t i = 0; i < mem_barrier_count; ++i) {
6186            const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
6187            if (mb_src_access_mask != (sub_src_access_mask & mb_src_access_mask)) {
6188                skip |=
6189                    log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6190                            rp_handle, __LINE__, VALIDATION_ERROR_1b80092e, "CORE",
6191                            "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
6192                            "srcAccessMask(0x%X) of "
6193                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6194                            funcName, i, mb_src_access_mask, sub_src_access_mask, cb_state->activeSubpass, rp_handle,
6195                            validation_error_map[VALIDATION_ERROR_1b80092e]);
6196            }
6197            const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
6198            if (mb_dst_access_mask != (sub_dst_access_mask & mb_dst_access_mask)) {
6199                skip |=
6200                    log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6201                            rp_handle, __LINE__, VALIDATION_ERROR_1b800930, "CORE",
6202                            "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
6203                            "dstAccessMask(0x%X) of "
6204                            "subpass %d of renderPass 0x%" PRIx64 ". %s",
6205                            funcName, i, mb_src_access_mask, sub_src_access_mask, cb_state->activeSubpass, rp_handle,
6206                            validation_error_map[VALIDATION_ERROR_1b800930]);
6207            }
6208        }
6209    }
6210    return skip;
6211}
6212
6213static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
6214                             VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
6215                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
6216                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
6217                             const VkImageMemoryBarrier *pImageMemBarriers) {
6218    bool skip = false;
6219    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
6220        auto mem_barrier = &pImageMemBarriers[i];
6221        auto image_data = GetImageState(device_data, mem_barrier->image);
6222        if (image_data) {
6223            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
6224            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
6225            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
6226                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
6227                // be VK_QUEUE_FAMILY_IGNORED
6228                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
6229                    skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6230                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
6231                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6232                                    "%s: Image Barrier for image 0x%" PRIx64
6233                                    " was created with sharingMode of "
6234                                    "VK_SHARING_MODE_CONCURRENT. Src and dst "
6235                                    "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
6236                                    funcName, HandleToUint64(mem_barrier->image));
6237                }
6238            } else {
6239                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
6240                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
6241                // or both be a valid queue family
6242                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
6243                    (src_q_f_index != dst_q_f_index)) {
6244                    skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6245                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
6246                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6247                                    "%s: Image 0x%" PRIx64
6248                                    " was created with sharingMode "
6249                                    "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
6250                                    "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
6251                                    "must be.",
6252                                    funcName, HandleToUint64(mem_barrier->image));
6253                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
6254                           ((src_q_f_index >= device_data->phys_dev_properties.queue_family_properties.size()) ||
6255                            (dst_q_f_index >= device_data->phys_dev_properties.queue_family_properties.size()))) {
6256                    skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6257                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
6258                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6259                                    "%s: Image 0x%" PRIx64
6260                                    " was created with sharingMode "
6261                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
6262                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
6263                                    "queueFamilies crated for this device.",
6264                                    funcName, HandleToUint64(mem_barrier->image), src_q_f_index, dst_q_f_index,
6265                                    device_data->phys_dev_properties.queue_family_properties.size());
6266                }
6267            }
6268        }
6269
6270        if (mem_barrier->oldLayout != mem_barrier->newLayout) {
6271            if (cb_state->activeRenderPass) {
6272                skip |=
6273                    log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6274                            HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b80093a, "DS",
6275                            "%s: As the Image Barrier for image 0x%" PRIx64
6276                            " is being executed within a render pass instance, oldLayout must equal newLayout yet they are "
6277                            "%s and %s. %s",
6278                            funcName, HandleToUint64(mem_barrier->image), string_VkImageLayout(mem_barrier->oldLayout),
6279                            string_VkImageLayout(mem_barrier->newLayout), validation_error_map[VALIDATION_ERROR_1b80093a]);
6280            }
6281            skip |= ValidateMaskBitsFromLayouts(device_data, cb_state->commandBuffer, mem_barrier->srcAccessMask,
6282                                                mem_barrier->oldLayout, "Source");
6283            skip |= ValidateMaskBitsFromLayouts(device_data, cb_state->commandBuffer, mem_barrier->dstAccessMask,
6284                                                mem_barrier->newLayout, "Dest");
6285        }
6286        if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
6287            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6288                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6289                            "%s: Image Layout cannot be transitioned to UNDEFINED or "
6290                            "PREINITIALIZED.",
6291                            funcName);
6292        }
6293        if (image_data) {
6294            auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
6295            skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
6296
6297            std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
6298            skip |= ValidateImageSubresourceRange(device_data, image_data, false, mem_barrier->subresourceRange, funcName,
6299                                                  param_name.c_str());
6300        }
6301    }
6302
6303    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
6304        auto mem_barrier = &pBufferMemBarriers[i];
6305        if (cb_state->activeRenderPass) {
6306            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6307                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6308                            "%s: Buffer Barriers cannot be used during a render pass.", funcName);
6309        }
6310        if (!mem_barrier) continue;
6311
6312        // Validate buffer barrier queue family indices
6313        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
6314             mem_barrier->srcQueueFamilyIndex >= device_data->phys_dev_properties.queue_family_properties.size()) ||
6315            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
6316             mem_barrier->dstQueueFamilyIndex >= device_data->phys_dev_properties.queue_family_properties.size())) {
6317            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6318                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6319                            "%s: Buffer Barrier 0x%" PRIx64
6320                            " has QueueFamilyIndex greater "
6321                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
6322                            funcName, HandleToUint64(mem_barrier->buffer),
6323                            device_data->phys_dev_properties.queue_family_properties.size());
6324        }
6325
6326        auto buffer_state = GetBufferState(device_data, mem_barrier->buffer);
6327        if (buffer_state) {
6328            auto buffer_size = buffer_state->requirements.size;
6329            if (mem_barrier->offset >= buffer_size) {
6330                skip |= log_msg(
6331                    device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6332                    HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6333                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
6334                    funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
6335                    HandleToUint64(buffer_size));
6336            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
6337                skip |=
6338                    log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6339                            HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
6340                            "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
6341                            " whose sum is greater than total size 0x%" PRIx64 ".",
6342                            funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
6343                            HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
6344            }
6345        }
6346    }
6347    return skip;
6348}
6349
6350bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
6351                            VkPipelineStageFlags sourceStageMask) {
6352    bool skip = false;
6353    VkPipelineStageFlags stageMask = 0;
6354    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
6355    for (uint32_t i = 0; i < eventCount; ++i) {
6356        auto event = pCB->events[firstEventIndex + i];
6357        auto queue_data = dev_data->queueMap.find(queue);
6358        if (queue_data == dev_data->queueMap.end()) return false;
6359        auto event_data = queue_data->second.eventToStageMap.find(event);
6360        if (event_data != queue_data->second.eventToStageMap.end()) {
6361            stageMask |= event_data->second;
6362        } else {
6363            auto global_event_data = GetEventNode(dev_data, event);
6364            if (!global_event_data) {
6365                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
6366                                HandleToUint64(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
6367                                "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
6368            } else {
6369                stageMask |= global_event_data->stageMask;
6370            }
6371        }
6372    }
6373    // TODO: Need to validate that host_bit is only set if set event is called
6374    // but set event can be called at any time.
6375    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
6376        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6377                        HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1e62d401, "DS",
6378                        "Submitting cmdbuffer with call to VkCmdWaitEvents "
6379                        "using srcStageMask 0x%X which must be the bitwise "
6380                        "OR of the stageMask parameters used in calls to "
6381                        "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
6382                        "used with vkSetEvent but instead is 0x%X. %s",
6383                        sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_1e62d401]);
6384    }
6385    return skip;
6386}
6387
6388// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
6389static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
6390    {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
6391    {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
6392    {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
6393    {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6394    {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6395    {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6396    {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6397    {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
6398    {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
6399    {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
6400    {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
6401    {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
6402    {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
6403    {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
6404
6405static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
6406                                                            VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
6407                                                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
6408                                                            VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
6409                                                            VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
6410                                                            VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
6411                                                            VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
6412                                                            VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
6413                                                            VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
6414                                                            VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
6415                                                            VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
6416                                                            VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
6417                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
6418                                                            VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
6419
6420bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
6421                                      VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
6422                                      UNIQUE_VALIDATION_ERROR_CODE error_code) {
6423    bool skip = false;
6424    // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
6425    for (const auto &item : stage_flag_bit_array) {
6426        if (stage_mask & item) {
6427            if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
6428                skip |=
6429                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6430                            HandleToUint64(command_buffer), __LINE__, error_code, "DL",
6431                            "%s(): %s flag %s is not compatible with the queue family properties of this "
6432                            "command buffer. %s",
6433                            function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
6434                            validation_error_map[error_code]);
6435            }
6436        }
6437    }
6438    return skip;
6439}
6440
6441bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
6442                                                VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
6443                                                const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
6444    bool skip = false;
6445    uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
6446    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
6447    auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
6448
6449    // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
6450    // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
6451    // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
6452
6453    if (queue_family_index < physical_device_state->queue_family_properties.size()) {
6454        VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
6455
6456        if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
6457            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
6458                                                     function, "srcStageMask", error_code);
6459        }
6460        if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
6461            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
6462                                                     function, "dstStageMask", error_code);
6463        }
6464    }
6465    return skip;
6466}
6467
6468VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
6469                                         VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
6470                                         uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6471                                         uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6472                                         uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6473    bool skip = false;
6474    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6475    unique_lock_t lock(global_lock);
6476    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6477    if (cb_state) {
6478        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
6479                                                           VALIDATION_ERROR_1e600918);
6480        skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e60090e,
6481                                             VALIDATION_ERROR_1e600912);
6482        skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e600910,
6483                                             VALIDATION_ERROR_1e600914);
6484        auto first_event_index = cb_state->events.size();
6485        for (uint32_t i = 0; i < eventCount; ++i) {
6486            auto event_state = GetEventNode(dev_data, pEvents[i]);
6487            if (event_state) {
6488                addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent}, cb_state);
6489                event_state->cb_bindings.insert(cb_state);
6490            }
6491            cb_state->waitedEvents.insert(pEvents[i]);
6492            cb_state->events.push_back(pEvents[i]);
6493        }
6494        cb_state->eventUpdates.emplace_back([=](VkQueue q){
6495            return validateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask);
6496        });
6497        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6498                                      VALIDATION_ERROR_1e602415);
6499        skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
6500        skip |= ValidateBarriersToImages(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
6501        if (!skip) {
6502            TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6503        }
6504        skip |= ValidateBarriers(dev_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
6505                                 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
6506                                 pImageMemoryBarriers);
6507    }
6508    lock.unlock();
6509    if (!skip)
6510        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
6511                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6512                                               imageMemoryBarrierCount, pImageMemoryBarriers);
6513}
6514
6515static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE const *cb_state,
6516                                              VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
6517                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6518                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6519                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6520    bool skip = false;
6521    skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
6522                                                       VALIDATION_ERROR_1b80093e);
6523    skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
6524                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b802415);
6525    skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
6526    skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800920,
6527                                         VALIDATION_ERROR_1b800924);
6528    skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800922,
6529                                         VALIDATION_ERROR_1b800926);
6530    skip |=
6531        ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
6532    if (cb_state->activeRenderPass) {
6533        skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
6534                                                   memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6535                                                   pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
6536    }
6537    skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
6538                             pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
6539                             pImageMemoryBarriers);
6540    return skip;
6541}
6542
6543static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
6544                                            uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6545    TransitionImageLayouts(device_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6546}
6547
6548VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
6549                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
6550                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
6551                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
6552                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
6553    bool skip = false;
6554    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6555    unique_lock_t lock(global_lock);
6556    GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
6557    if (cb_state) {
6558        skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
6559                                                  pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6560                                                  imageMemoryBarrierCount, pImageMemoryBarriers);
6561        if (!skip) {
6562            PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
6563        }
6564    } else {
6565        assert(0);
6566    }
6567    lock.unlock();
6568    if (!skip) {
6569        device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
6570                                                       pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
6571                                                       imageMemoryBarrierCount, pImageMemoryBarriers);
6572    }
6573}
6574
6575static bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
6576    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6577    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6578    if (pCB) {
6579        pCB->queryToStateMap[object] = value;
6580    }
6581    auto queue_data = dev_data->queueMap.find(queue);
6582    if (queue_data != dev_data->queueMap.end()) {
6583        queue_data->second.queryToStateMap[object] = value;
6584    }
6585    return false;
6586}
6587
6588VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
6589    bool skip = false;
6590    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6591    unique_lock_t lock(global_lock);
6592    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6593    if (pCB) {
6594        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6595                                      VALIDATION_ERROR_17802415);
6596        skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
6597    }
6598    lock.unlock();
6599
6600    if (skip) return;
6601
6602    dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
6603
6604    lock.lock();
6605    if (pCB) {
6606        QueryObject query = {queryPool, slot};
6607        pCB->activeQueries.insert(query);
6608        pCB->startedQueries.insert(query);
6609        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6610                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
6611    }
6612}
6613
6614VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
6615    bool skip = false;
6616    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6617    unique_lock_t lock(global_lock);
6618    QueryObject query = {queryPool, slot};
6619    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6620    if (cb_state) {
6621        if (!cb_state->activeQueries.count(query)) {
6622            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6623                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1ae00652, "DS",
6624                            "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
6625                            HandleToUint64(queryPool), slot, validation_error_map[VALIDATION_ERROR_1ae00652]);
6626        }
6627        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6628                                      VALIDATION_ERROR_1ae02415);
6629        skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
6630    }
6631    lock.unlock();
6632
6633    if (skip) return;
6634
6635    dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
6636
6637    lock.lock();
6638    if (cb_state) {
6639        cb_state->activeQueries.erase(query);
6640        cb_state->queryUpdates.emplace_back([=](VkQueue q){return setQueryState(q, commandBuffer, query, true);});
6641        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6642                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
6643    }
6644}
6645
6646VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
6647                                             uint32_t queryCount) {
6648    bool skip = false;
6649    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6650    unique_lock_t lock(global_lock);
6651    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6652        skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_1c600017);
6653        skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
6654        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6655                                      VALIDATION_ERROR_1c602415);
6656    lock.unlock();
6657
6658    if (skip) return;
6659
6660    dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
6661
6662    lock.lock();
6663    for (uint32_t i = 0; i < queryCount; i++) {
6664        QueryObject query = {queryPool, firstQuery + i};
6665        cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
6666        cb_state->queryUpdates.emplace_back([=](VkQueue q){return setQueryState(q, commandBuffer, query, false);});
6667    }
6668    addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6669                            {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
6670}
6671
6672static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
6673    QueryObject query = {queryPool, queryIndex};
6674    auto query_data = queue_data->queryToStateMap.find(query);
6675    if (query_data != queue_data->queryToStateMap.end()) {
6676        if (!query_data->second) return true;
6677    } else {
6678        auto it = dev_data->queryToStateMap.find(query);
6679        if (it == dev_data->queryToStateMap.end() || !it->second)
6680            return true;
6681    }
6682
6683    return false;
6684}
6685
6686static bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
6687    bool skip = false;
6688    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
6689    auto queue_data = GetQueueState(dev_data, queue);
6690    if (!queue_data) return false;
6691    for (uint32_t i = 0; i < queryCount; i++) {
6692        if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
6693            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6694                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
6695                            "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
6696                            HandleToUint64(queryPool), firstQuery + i);
6697        }
6698    }
6699    return skip;
6700}
6701
6702VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
6703                                                   uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
6704                                                   VkDeviceSize stride, VkQueryResultFlags flags) {
6705    bool skip = false;
6706    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6707    unique_lock_t lock(global_lock);
6708
6709    auto cb_node = GetCBNode(dev_data, commandBuffer);
6710    auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
6711    if (cb_node && dst_buff_state) {
6712        skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400674);
6713        // Validate that DST buffer has correct usage flags set
6714        skip |=
6715            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_19400672,
6716                                     "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6717        skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
6718                                      VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_19402415);
6719        skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
6720        skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400017);
6721    }
6722    lock.unlock();
6723
6724    if (skip) return;
6725
6726    dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
6727                                                     stride, flags);
6728
6729    lock.lock();
6730    if (cb_node && dst_buff_state) {
6731        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
6732        cb_node->validate_functions.emplace_back([=]() {
6733            SetBufferMemoryValid(dev_data, dst_buff_state, true);
6734            return false;
6735        });
6736        cb_node->queryUpdates.emplace_back([=](VkQueue q) {
6737            return validateQuery(q, cb_node, queryPool, firstQuery, queryCount);
6738        });
6739        addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
6740                                {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
6741    }
6742}
6743
6744VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
6745                                            uint32_t offset, uint32_t size, const void *pValues) {
6746    bool skip = false;
6747    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6748    unique_lock_t lock(global_lock);
6749    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6750    if (cb_state) {
6751        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6752                                      VALIDATION_ERROR_1bc02415);
6753        skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
6754    }
6755    skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
6756    if (0 == stageFlags) {
6757        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6758                        HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc2dc03, "DS",
6759                        "vkCmdPushConstants() call has no stageFlags set. %s", validation_error_map[VALIDATION_ERROR_1bc2dc03]);
6760    }
6761
6762    // Check if specified push constant range falls within a pipeline-defined range which has matching stageFlags.
6763    // The spec doesn't seem to disallow having multiple push constant ranges with the
6764    // same offset and size, but different stageFlags.  So we can't just check the
6765    // stageFlags in the first range with matching offset and size.
6766    if (!skip) {
6767        const auto &ranges = getPipelineLayout(dev_data, layout)->push_constant_ranges;
6768        bool found_matching_range = false;
6769        for (const auto &range : ranges) {
6770            if ((stageFlags == range.stageFlags) && (offset >= range.offset) && (offset + size <= range.offset + range.size)) {
6771                found_matching_range = true;
6772                break;
6773            }
6774        }
6775        if (!found_matching_range) {
6776            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6777                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc002de, "DS",
6778                            "vkCmdPushConstants() stageFlags = 0x%" PRIx32
6779                            " do not match the stageFlags in any of the ranges with"
6780                            " offset = %d and size = %d in pipeline layout 0x%" PRIx64 ". %s",
6781                            (uint32_t)stageFlags, offset, size, HandleToUint64(layout),
6782                            validation_error_map[VALIDATION_ERROR_1bc002de]);
6783        }
6784    }
6785    lock.unlock();
6786    if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
6787}
6788
6789VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
6790                                             VkQueryPool queryPool, uint32_t slot) {
6791    bool skip = false;
6792    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6793    unique_lock_t lock(global_lock);
6794    GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6795    if (cb_state) {
6796        skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6797                                      VALIDATION_ERROR_1e802415);
6798        skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
6799    }
6800    lock.unlock();
6801
6802    if (skip) return;
6803
6804    dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
6805
6806    lock.lock();
6807    if (cb_state) {
6808        QueryObject query = {queryPool, slot};
6809        cb_state->queryUpdates.emplace_back([=](VkQueue q) {return setQueryState(q, commandBuffer, query, true);});
6810    }
6811}
6812
6813static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
6814                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
6815                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
6816    bool skip = false;
6817
6818    for (uint32_t attach = 0; attach < count; attach++) {
6819        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
6820            // Attachment counts are verified elsewhere, but prevent an invalid access
6821            if (attachments[attach].attachment < fbci->attachmentCount) {
6822                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
6823                auto view_state = GetImageViewState(dev_data, *image_view);
6824                if (view_state) {
6825                    const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
6826                    if (ici != nullptr) {
6827                        if ((ici->usage & usage_flag) == 0) {
6828                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6829                                            VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, error_code, "DS",
6830                                            "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
6831                                            "IMAGE_USAGE flags (%s). %s",
6832                                            attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
6833                                            validation_error_map[error_code]);
6834                        }
6835                    }
6836                }
6837            }
6838        }
6839    }
6840    return skip;
6841}
6842
6843// Validate VkFramebufferCreateInfo which includes:
6844// 1. attachmentCount equals renderPass attachmentCount
6845// 2. corresponding framebuffer and renderpass attachments have matching formats
6846// 3. corresponding framebuffer and renderpass attachments have matching sample counts
6847// 4. fb attachments only have a single mip level
6848// 5. fb attachment dimensions are each at least as large as the fb
6849// 6. fb attachments use idenity swizzle
6850// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
6851// 8. fb dimensions are within physical device limits
6852static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
6853    bool skip = false;
6854
6855    auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
6856    if (rp_state) {
6857        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
6858        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
6859            skip |= log_msg(
6860                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6861                HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006d8, "DS",
6862                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
6863                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
6864                pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass),
6865                validation_error_map[VALIDATION_ERROR_094006d8]);
6866        } else {
6867            // attachmentCounts match, so make sure corresponding attachment details line up
6868            const VkImageView *image_views = pCreateInfo->pAttachments;
6869            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
6870                auto view_state = GetImageViewState(dev_data, image_views[i]);
6871                auto &ivci = view_state->create_info;
6872                if (ivci.format != rpci->pAttachments[i].format) {
6873                    skip |= log_msg(
6874                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6875                        HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e0, "DS",
6876                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
6877                        "the format of "
6878                        "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
6879                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
6880                        HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e0]);
6881                }
6882                const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
6883                if (ici->samples != rpci->pAttachments[i].samples) {
6884                    skip |= log_msg(
6885                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
6886                        HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e2, "DS",
6887                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
6888                        "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
6889                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
6890                        HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e2]);
6891                }
6892                // Verify that view only has a single mip level
6893                if (ivci.subresourceRange.levelCount != 1) {
6894                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
6895                                    0, __LINE__, VALIDATION_ERROR_094006e6, "DS",
6896                                    "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
6897                                    "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
6898                                    i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_094006e6]);
6899                }
6900                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
6901                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
6902                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
6903                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
6904                    (mip_height < pCreateInfo->height)) {
6905                    skip |= log_msg(
6906                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6907                        VALIDATION_ERROR_094006e4, "DS",
6908                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
6909                        "than the corresponding framebuffer dimensions. Here are the respective dimensions for attachment #%u, "
6910                        "framebuffer:\n"
6911                        "width: %u, %u\n"
6912                        "height: %u, %u\n"
6913                        "layerCount: %u, %u\n%s",
6914                        i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height, pCreateInfo->height,
6915                        ivci.subresourceRange.layerCount, pCreateInfo->layers, validation_error_map[VALIDATION_ERROR_094006e4]);
6916                }
6917                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
6918                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
6919                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
6920                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
6921                    skip |= log_msg(
6922                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6923                        VALIDATION_ERROR_094006e8, "DS",
6924                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
6925                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
6926                        "r swizzle = %s\n"
6927                        "g swizzle = %s\n"
6928                        "b swizzle = %s\n"
6929                        "a swizzle = %s\n"
6930                        "%s",
6931                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
6932                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
6933                        validation_error_map[VALIDATION_ERROR_094006e8]);
6934                }
6935            }
6936        }
6937        // Verify correct attachment usage flags
6938        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
6939            // Verify input attachments:
6940            skip |=
6941                MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
6942                           pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_094006de);
6943            // Verify color attachments:
6944            skip |=
6945                MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
6946                           pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_094006da);
6947            // Verify depth/stencil attachments:
6948            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
6949                skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
6950                                   VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_094006dc);
6951            }
6952        }
6953    }
6954    // Verify FB dimensions are within physical device limits
6955    if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
6956        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6957                        VALIDATION_ERROR_094006ec, "DS",
6958                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
6959                        "Requested width: %u, device max: %u\n"
6960                        "%s",
6961                        pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
6962                        validation_error_map[VALIDATION_ERROR_094006ec]);
6963    }
6964    if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
6965        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6966                        VALIDATION_ERROR_094006f0, "DS",
6967                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
6968                        "Requested height: %u, device max: %u\n"
6969                        "%s",
6970                        pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
6971                        validation_error_map[VALIDATION_ERROR_094006f0]);
6972    }
6973    if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
6974        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6975                        VALIDATION_ERROR_094006f4, "DS",
6976                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
6977                        "Requested layers: %u, device max: %u\n"
6978                        "%s",
6979                        pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
6980                        validation_error_map[VALIDATION_ERROR_094006f4]);
6981    }
6982    // Verify FB dimensions are greater than zero
6983    if (pCreateInfo->width <= 0) {
6984        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6985                        VALIDATION_ERROR_094006ea, "DS",
6986                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero. %s",
6987                        validation_error_map[VALIDATION_ERROR_094006ea]);
6988    }
6989    if (pCreateInfo->height <= 0) {
6990        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6991                        VALIDATION_ERROR_094006ee, "DS",
6992                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero. %s",
6993                        validation_error_map[VALIDATION_ERROR_094006ee]);
6994    }
6995    if (pCreateInfo->layers <= 0) {
6996        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
6997                        VALIDATION_ERROR_094006f2, "DS",
6998                        "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero. %s",
6999                        validation_error_map[VALIDATION_ERROR_094006f2]);
7000    }
7001    return skip;
7002}
7003
7004// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
7005//  Return true if an error is encountered and callback returns true to skip call down chain
7006//   false indicates that call down chain should proceed
7007static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
7008    // TODO : Verify that renderPass FB is created with is compatible with FB
7009    bool skip = false;
7010    skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
7011    return skip;
7012}
7013
7014// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
7015static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
7016    // Shadow create info and store in map
7017    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
7018        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
7019
7020    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
7021        VkImageView view = pCreateInfo->pAttachments[i];
7022        auto view_state = GetImageViewState(dev_data, view);
7023        if (!view_state) {
7024            continue;
7025        }
7026        MT_FB_ATTACHMENT_INFO fb_info;
7027        fb_info.view_state = view_state;
7028        fb_info.image = view_state->create_info.image;
7029        fb_state->attachments.push_back(fb_info);
7030    }
7031    dev_data->frameBufferMap[fb] = std::move(fb_state);
7032}
7033
7034VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
7035                                                 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
7036    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7037    unique_lock_t lock(global_lock);
7038    bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
7039    lock.unlock();
7040
7041    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
7042
7043    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
7044
7045    if (VK_SUCCESS == result) {
7046        lock.lock();
7047        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
7048        lock.unlock();
7049    }
7050    return result;
7051}
7052
7053static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
7054                           std::unordered_set<uint32_t> &processed_nodes) {
7055    // If we have already checked this node we have not found a dependency path so return false.
7056    if (processed_nodes.count(index)) return false;
7057    processed_nodes.insert(index);
7058    const DAGNode &node = subpass_to_node[index];
7059    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
7060    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
7061        for (auto elem : node.prev) {
7062            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
7063        }
7064    } else {
7065        return true;
7066    }
7067    return false;
7068}
7069
7070static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
7071                                  const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
7072                                  bool &skip) {
7073    bool result = true;
7074    // Loop through all subpasses that share the same attachment and make sure a dependency exists
7075    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
7076        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
7077        const DAGNode &node = subpass_to_node[subpass];
7078        // Check for a specified dependency between the two nodes. If one exists we are done.
7079        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
7080        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
7081        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
7082            // If no dependency exits an implicit dependency still might. If not, throw an error.
7083            std::unordered_set<uint32_t> processed_nodes;
7084            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
7085                  FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
7086                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7087                                __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7088                                "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
7089                                dependent_subpasses[k]);
7090                result = false;
7091            }
7092        }
7093    }
7094    return result;
7095}
7096
7097static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
7098                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
7099    const DAGNode &node = subpass_to_node[index];
7100    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
7101    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
7102    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7103        if (attachment == subpass.pColorAttachments[j].attachment) return true;
7104    }
7105    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7106        if (attachment == subpass.pInputAttachments[j].attachment) return true;
7107    }
7108    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7109        if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
7110    }
7111    bool result = false;
7112    // Loop through previous nodes and see if any of them write to the attachment.
7113    for (auto elem : node.prev) {
7114        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
7115    }
7116    // If the attachment was written to by a previous node than this node needs to preserve it.
7117    if (result && depth > 0) {
7118        bool has_preserved = false;
7119        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
7120            if (subpass.pPreserveAttachments[j] == attachment) {
7121                has_preserved = true;
7122                break;
7123            }
7124        }
7125        if (!has_preserved) {
7126            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7127                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7128                            "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
7129        }
7130    }
7131    return result;
7132}
7133
7134template <class T>
7135bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
7136    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
7137           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
7138}
7139
7140bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
7141    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
7142            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
7143}
7144
7145static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
7146                                 RENDER_PASS_STATE const *renderPass) {
7147    bool skip = false;
7148    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
7149    auto const pCreateInfo = renderPass->createInfo.ptr();
7150    auto const &subpass_to_node = renderPass->subpassToNode;
7151    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
7152    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
7153    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
7154    // Find overlapping attachments
7155    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
7156        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
7157            VkImageView viewi = pFramebufferInfo->pAttachments[i];
7158            VkImageView viewj = pFramebufferInfo->pAttachments[j];
7159            if (viewi == viewj) {
7160                overlapping_attachments[i].push_back(j);
7161                overlapping_attachments[j].push_back(i);
7162                continue;
7163            }
7164            auto view_state_i = GetImageViewState(dev_data, viewi);
7165            auto view_state_j = GetImageViewState(dev_data, viewj);
7166            if (!view_state_i || !view_state_j) {
7167                continue;
7168            }
7169            auto view_ci_i = view_state_i->create_info;
7170            auto view_ci_j = view_state_j->create_info;
7171            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
7172                overlapping_attachments[i].push_back(j);
7173                overlapping_attachments[j].push_back(i);
7174                continue;
7175            }
7176            auto image_data_i = GetImageState(dev_data, view_ci_i.image);
7177            auto image_data_j = GetImageState(dev_data, view_ci_j.image);
7178            if (!image_data_i || !image_data_j) {
7179                continue;
7180            }
7181            if (image_data_i->binding.mem == image_data_j->binding.mem &&
7182                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
7183                                   image_data_j->binding.size)) {
7184                overlapping_attachments[i].push_back(j);
7185                overlapping_attachments[j].push_back(i);
7186            }
7187        }
7188    }
7189    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
7190        uint32_t attachment = i;
7191        for (auto other_attachment : overlapping_attachments[i]) {
7192            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
7193                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7194                                HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
7195                                "Attachment %d aliases attachment %d but doesn't "
7196                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
7197                                attachment, other_attachment, validation_error_map[VALIDATION_ERROR_12200682]);
7198            }
7199            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
7200                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7201                                HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
7202                                "Attachment %d aliases attachment %d but doesn't "
7203                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
7204                                other_attachment, attachment, validation_error_map[VALIDATION_ERROR_12200682]);
7205            }
7206        }
7207    }
7208    // Find for each attachment the subpasses that use them.
7209    unordered_set<uint32_t> attachmentIndices;
7210    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7211        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7212        attachmentIndices.clear();
7213        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7214            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7215            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7216            input_attachment_to_subpass[attachment].push_back(i);
7217            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7218                input_attachment_to_subpass[overlapping_attachment].push_back(i);
7219            }
7220        }
7221        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7222            uint32_t attachment = subpass.pColorAttachments[j].attachment;
7223            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7224            output_attachment_to_subpass[attachment].push_back(i);
7225            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7226                output_attachment_to_subpass[overlapping_attachment].push_back(i);
7227            }
7228            attachmentIndices.insert(attachment);
7229        }
7230        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7231            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
7232            output_attachment_to_subpass[attachment].push_back(i);
7233            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
7234                output_attachment_to_subpass[overlapping_attachment].push_back(i);
7235            }
7236
7237            if (attachmentIndices.count(attachment)) {
7238                skip |=
7239                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7240                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7241                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
7242            }
7243        }
7244    }
7245    // If there is a dependency needed make sure one exists
7246    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7247        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7248        // If the attachment is an input then all subpasses that output must have a dependency relationship
7249        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7250            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7251            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7252            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7253        }
7254        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
7255        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7256            uint32_t attachment = subpass.pColorAttachments[j].attachment;
7257            if (attachment == VK_ATTACHMENT_UNUSED) continue;
7258            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7259            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
7260        }
7261        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7262            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
7263            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
7264            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
7265        }
7266    }
7267    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
7268    // written.
7269    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7270        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7271        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7272            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
7273        }
7274    }
7275    return skip;
7276}
7277
7278static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
7279                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency,
7280                          std::vector<int32_t> &subpass_to_dep_index) {
7281    bool skip = false;
7282    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7283        DAGNode &subpass_node = subpass_to_node[i];
7284        subpass_node.pass = i;
7285        subpass_to_dep_index[i] = -1;  // Default to no dependency and overwrite below as needed
7286    }
7287    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
7288        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
7289        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
7290            if (dependency.srcSubpass == dependency.dstSubpass) {
7291                skip |=
7292                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7293                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
7294            }
7295        } else if (dependency.srcSubpass > dependency.dstSubpass) {
7296            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7297                            __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
7298                            "Dependency graph must be specified such that an earlier pass cannot depend on a later pass.");
7299        } else if (dependency.srcSubpass == dependency.dstSubpass) {
7300            has_self_dependency[dependency.srcSubpass] = true;
7301        } else {
7302            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
7303            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
7304        }
7305        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
7306            subpass_to_dep_index[dependency.srcSubpass] = i;
7307        }
7308    }
7309    return skip;
7310}
7311
7312VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
7313                                                  const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
7314    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7315    bool spirv_valid;
7316
7317    if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid))
7318        return VK_ERROR_VALIDATION_FAILED_EXT;
7319
7320    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
7321
7322    if (res == VK_SUCCESS) {
7323        lock_guard_t lock(global_lock);
7324        unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo) : new shader_module());
7325        dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
7326    }
7327    return res;
7328}
7329
7330static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
7331    bool skip = false;
7332    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
7333        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7334                        VALIDATION_ERROR_12200684, "DS",
7335                        "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s", type,
7336                        attachment, attachment_count, validation_error_map[VALIDATION_ERROR_12200684]);
7337    }
7338    return skip;
7339}
7340
7341static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
7342
7343static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
7344    bool skip = false;
7345    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7346        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7347        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
7348            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7349                            __LINE__, VALIDATION_ERROR_14000698, "DS",
7350                            "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s", i,
7351                            validation_error_map[VALIDATION_ERROR_14000698]);
7352        }
7353
7354        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
7355            uint32_t attachment = subpass.pPreserveAttachments[j];
7356            if (attachment == VK_ATTACHMENT_UNUSED) {
7357                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7358                                __LINE__, VALIDATION_ERROR_140006aa, "DS",
7359                                "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
7360                                validation_error_map[VALIDATION_ERROR_140006aa]);
7361            } else {
7362                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
7363
7364                bool found = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment);
7365                for (uint32_t r = 0; !found && r < subpass.inputAttachmentCount; ++r) {
7366                    found = (subpass.pInputAttachments[r].attachment == attachment);
7367                }
7368                for (uint32_t r = 0; !found && r < subpass.colorAttachmentCount; ++r) {
7369                    found = (subpass.pColorAttachments[r].attachment == attachment) ||
7370                            (subpass.pResolveAttachments != NULL && subpass.pResolveAttachments[r].attachment == attachment);
7371                }
7372                if (found) {
7373                    skip |= log_msg(
7374                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7375                        VALIDATION_ERROR_140006ac, "DS",
7376                        "CreateRenderPass: subpass %u pPreserveAttachments[%u] (%u) must not be used elsewhere in the subpass. %s",
7377                        i, j, attachment, validation_error_map[VALIDATION_ERROR_140006ac]);
7378                }
7379            }
7380        }
7381
7382        auto subpass_performs_resolve =
7383            subpass.pResolveAttachments &&
7384            std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
7385                        [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
7386
7387        unsigned sample_count = 0;
7388
7389        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7390            uint32_t attachment;
7391            if (subpass.pResolveAttachments) {
7392                attachment = subpass.pResolveAttachments[j].attachment;
7393                skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
7394
7395                if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
7396                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
7397                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7398                                    0, __LINE__, VALIDATION_ERROR_140006a2, "DS",
7399                                    "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
7400                                    "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
7401                                    i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
7402                                    validation_error_map[VALIDATION_ERROR_140006a2]);
7403                }
7404
7405                if (!skip && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
7406                    subpass.pColorAttachments[j].attachment == VK_ATTACHMENT_UNUSED) {
7407                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7408                                    0, __LINE__, VALIDATION_ERROR_1400069e, "DS",
7409                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
7410                                    "which has attachment=VK_ATTACHMENT_UNUSED. %s",
7411                                    i, attachment, validation_error_map[VALIDATION_ERROR_1400069e]);
7412                }
7413            }
7414            attachment = subpass.pColorAttachments[j].attachment;
7415            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
7416
7417            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
7418                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
7419
7420                if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
7421                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7422                                    0, __LINE__, VALIDATION_ERROR_140006a0, "DS",
7423                                    "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
7424                                    "which has VK_SAMPLE_COUNT_1_BIT. %s",
7425                                    i, attachment, validation_error_map[VALIDATION_ERROR_140006a0]);
7426                }
7427
7428                if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
7429                    const auto &color_desc = pCreateInfo->pAttachments[attachment];
7430                    const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
7431                    if (color_desc.format != resolve_desc.format) {
7432                        skip |=
7433                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
7434                                    0, __LINE__, VALIDATION_ERROR_140006a4, "DS",
7435                                    "CreateRenderPass:  Subpass %u pColorAttachments[%u] resolves to an attachment with a "
7436                                    "different format. "
7437                                    "color format: %u, resolve format: %u. %s",
7438                                    i, j, color_desc.format, resolve_desc.format, validation_error_map[VALIDATION_ERROR_140006a4]);
7439                    }
7440                }
7441            }
7442        }
7443
7444        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
7445            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
7446            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
7447
7448            if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
7449                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
7450            }
7451        }
7452
7453        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7454            uint32_t attachment = subpass.pInputAttachments[j].attachment;
7455            skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
7456        }
7457
7458        if (sample_count && !IsPowerOfTwo(sample_count)) {
7459            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
7460                            __LINE__, VALIDATION_ERROR_0082b401, "DS",
7461                            "CreateRenderPass:  Subpass %u attempts to render to "
7462                            "attachments with inconsistent sample counts. %s",
7463                            i, validation_error_map[VALIDATION_ERROR_0082b401]);
7464        }
7465    }
7466    return skip;
7467}
7468
7469static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass,
7470                                   uint32_t index,
7471                                   bool is_read) {
7472    if (index == VK_ATTACHMENT_UNUSED)
7473        return;
7474
7475    if (!render_pass->attachment_first_read.count(index))
7476        render_pass->attachment_first_read[index] = is_read;
7477}
7478
7479VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
7480                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
7481    bool skip = false;
7482    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7483
7484    unique_lock_t lock(global_lock);
7485    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
7486    //       ValidateLayouts.
7487    skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
7488    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
7489        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
7490                                             VALIDATION_ERROR_13e006b8, VALIDATION_ERROR_13e006bc);
7491        skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
7492                                             VALIDATION_ERROR_13e006ba, VALIDATION_ERROR_13e006be);
7493    }
7494    if (!skip) {
7495        skip |= ValidateLayouts(dev_data, device, pCreateInfo);
7496    }
7497    lock.unlock();
7498
7499    if (skip) {
7500        return VK_ERROR_VALIDATION_FAILED_EXT;
7501    }
7502
7503    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
7504
7505    if (VK_SUCCESS == result) {
7506        lock.lock();
7507
7508        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
7509        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
7510        std::vector<int32_t> subpass_to_dep_index(pCreateInfo->subpassCount);
7511        skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency, subpass_to_dep_index);
7512
7513        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
7514        render_pass->renderPass = *pRenderPass;
7515        render_pass->hasSelfDependency = has_self_dependency;
7516        render_pass->subpassToNode = subpass_to_node;
7517        render_pass->subpass_to_dependency_index = subpass_to_dep_index;
7518
7519        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
7520            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
7521            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
7522                MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
7523
7524                // resolve attachments are considered to be written
7525                if (subpass.pResolveAttachments) {
7526                    MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
7527                }
7528            }
7529            if (subpass.pDepthStencilAttachment) {
7530                MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
7531            }
7532            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
7533                MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
7534            }
7535        }
7536
7537        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
7538    }
7539    return result;
7540}
7541
7542static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
7543                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
7544    bool skip = false;
7545    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7546        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7547                        HandleToUint64(pCB->commandBuffer), __LINE__, error_code, "DS",
7548                        "Cannot execute command %s on a secondary command buffer. %s", cmd_name, validation_error_map[error_code]);
7549    }
7550    return skip;
7551}
7552
7553static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
7554    bool skip = false;
7555    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
7556        &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
7557    if (pRenderPassBegin->renderArea.offset.x < 0 ||
7558        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
7559        pRenderPassBegin->renderArea.offset.y < 0 ||
7560        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
7561        skip |= static_cast<bool>(log_msg(
7562            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
7563            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
7564            "Cannot execute a render pass with renderArea not within the bound of the "
7565            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
7566            "height %d.",
7567            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
7568            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
7569    }
7570    return skip;
7571}
7572
7573// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
7574// [load|store]Op flag must be checked
7575// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
7576template <typename T>
7577static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
7578    if (color_depth_op != op && stencil_op != op) {
7579        return false;
7580    }
7581    bool check_color_depth_load_op = !FormatIsStencilOnly(format);
7582    bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
7583
7584    return ((check_color_depth_load_op && (color_depth_op == op)) ||
7585            (check_stencil_load_op && (stencil_op == op)));
7586}
7587
7588VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
7589                                              VkSubpassContents contents) {
7590    bool skip = false;
7591    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7592    unique_lock_t lock(global_lock);
7593    GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
7594    auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
7595    auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
7596    if (cb_node) {
7597        if (render_pass_state) {
7598            uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
7599            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
7600            for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
7601                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
7602                auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
7603                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
7604                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
7605                    clear_op_size = static_cast<uint32_t>(i) + 1;
7606                    std::function<bool()> function = [=]() {
7607                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
7608                        return false;
7609                    };
7610                    cb_node->validate_functions.push_back(function);
7611                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
7612                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
7613                    std::function<bool()> function = [=]() {
7614                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
7615                        return false;
7616                    };
7617                    cb_node->validate_functions.push_back(function);
7618                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
7619                                                                pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
7620                    std::function<bool()> function = [=]() {
7621                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
7622                                                          "vkCmdBeginRenderPass()");
7623                    };
7624                    cb_node->validate_functions.push_back(function);
7625                }
7626                if (render_pass_state->attachment_first_read[i]) {
7627                    std::function<bool()> function = [=]() {
7628                        return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
7629                                                          "vkCmdBeginRenderPass()");
7630                    };
7631                    cb_node->validate_functions.push_back(function);
7632                }
7633            }
7634            if (clear_op_size > pRenderPassBegin->clearValueCount) {
7635                skip |= log_msg(
7636                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7637                    HandleToUint64(render_pass_state->renderPass), __LINE__, VALIDATION_ERROR_1200070c, "DS",
7638                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
7639                    "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
7640                    "0x%" PRIx64
7641                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
7642                    "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
7643                    "attachments that aren't cleared they will be ignored. %s",
7644                    pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass), clear_op_size,
7645                    clear_op_size - 1, validation_error_map[VALIDATION_ERROR_1200070c]);
7646            }
7647            skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
7648            skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
7649                                                          GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
7650            skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00017);
7651            skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
7652            skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00019);
7653            skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
7654                                          VALIDATION_ERROR_17a02415);
7655            skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
7656            cb_node->activeRenderPass = render_pass_state;
7657            // This is a shallow copy as that is all that is needed for now
7658            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
7659            cb_node->activeSubpass = 0;
7660            cb_node->activeSubpassContents = contents;
7661            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
7662            // Connect this framebuffer and its children to this cmdBuffer
7663            AddFramebufferBinding(dev_data, cb_node, framebuffer);
7664            // transition attachments to the correct layouts for beginning of renderPass and first subpass
7665            TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
7666        }
7667    }
7668    lock.unlock();
7669    if (!skip) {
7670        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
7671    }
7672}
7673
7674VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
7675    bool skip = false;
7676    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7677    unique_lock_t lock(global_lock);
7678    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7679    if (pCB) {
7680        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600019);
7681        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b602415);
7682        skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
7683        skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600017);
7684
7685        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
7686        if (pCB->activeSubpass == subpassCount - 1) {
7687            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7688                            HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1b60071a, "DS",
7689                            "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s",
7690                            validation_error_map[VALIDATION_ERROR_1b60071a]);
7691        }
7692    }
7693    lock.unlock();
7694
7695    if (skip) return;
7696
7697    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
7698
7699    if (pCB) {
7700        lock.lock();
7701        pCB->activeSubpass++;
7702        pCB->activeSubpassContents = contents;
7703        TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
7704                                 GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
7705    }
7706}
7707
7708VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
7709    bool skip = false;
7710    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7711    unique_lock_t lock(global_lock);
7712    auto pCB = GetCBNode(dev_data, commandBuffer);
7713    FRAMEBUFFER_STATE *framebuffer = NULL;
7714    if (pCB) {
7715        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
7716        framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
7717        if (rp_state) {
7718            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
7719                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7720                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
7721                                VALIDATION_ERROR_1b00071c, "DS", "vkCmdEndRenderPass(): Called before reaching final subpass. %s",
7722                                validation_error_map[VALIDATION_ERROR_1b00071c]);
7723            }
7724
7725            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
7726                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
7727                auto pAttachment = &rp_state->createInfo.pAttachments[i];
7728                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
7729                                                         VK_ATTACHMENT_STORE_OP_STORE)) {
7730                    std::function<bool()> function = [=]() {
7731                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
7732                        return false;
7733                    };
7734                    pCB->validate_functions.push_back(function);
7735                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
7736                                                                pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
7737                    std::function<bool()> function = [=]() {
7738                        SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
7739                        return false;
7740                    };
7741                    pCB->validate_functions.push_back(function);
7742                }
7743            }
7744        }
7745        skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_1b000017);
7746        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_1b000019);
7747        skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b002415);
7748        skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
7749    }
7750    lock.unlock();
7751
7752    if (skip) return;
7753
7754    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
7755
7756    if (pCB) {
7757        lock.lock();
7758        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
7759        pCB->activeRenderPass = nullptr;
7760        pCB->activeSubpass = 0;
7761        pCB->activeFramebuffer = VK_NULL_HANDLE;
7762    }
7763}
7764
7765static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
7766                                        uint32_t secondaryAttach, const char *msg) {
7767    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7768                   HandleToUint64(secondaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c4, "DS",
7769                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64
7770                   " which has a render pass "
7771                   "that is not compatible with the Primary Cmd Buffer current render pass. "
7772                   "Attachment %u is not compatible with %u: %s. %s",
7773                   HandleToUint64(secondaryBuffer), primaryAttach, secondaryAttach, msg,
7774                   validation_error_map[VALIDATION_ERROR_1b2000c4]);
7775}
7776
7777static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7778                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
7779                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
7780                                            uint32_t secondaryAttach, bool is_multi) {
7781    bool skip = false;
7782    if (primaryPassCI->attachmentCount <= primaryAttach) {
7783        primaryAttach = VK_ATTACHMENT_UNUSED;
7784    }
7785    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
7786        secondaryAttach = VK_ATTACHMENT_UNUSED;
7787    }
7788    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
7789        return skip;
7790    }
7791    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
7792        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
7793                                            "The first is unused while the second is not.");
7794        return skip;
7795    }
7796    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
7797        skip |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
7798                                            "The second is unused while the first is not.");
7799        return skip;
7800    }
7801    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
7802        skip |=
7803            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
7804    }
7805    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
7806        skip |=
7807            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
7808    }
7809    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
7810        skip |=
7811            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
7812    }
7813    return skip;
7814}
7815
7816static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7817                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
7818                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
7819    bool skip = false;
7820    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
7821    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
7822    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
7823    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
7824        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
7825        if (i < primary_desc.inputAttachmentCount) {
7826            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
7827        }
7828        if (i < secondary_desc.inputAttachmentCount) {
7829            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
7830        }
7831        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
7832                                                secondaryPassCI, secondary_input_attach, is_multi);
7833    }
7834    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
7835    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
7836        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
7837        if (i < primary_desc.colorAttachmentCount) {
7838            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
7839        }
7840        if (i < secondary_desc.colorAttachmentCount) {
7841            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
7842        }
7843        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
7844                                                secondaryPassCI, secondary_color_attach, is_multi);
7845        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
7846        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
7847            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
7848        }
7849        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
7850            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
7851        }
7852        skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach, secondaryBuffer,
7853                                                secondaryPassCI, secondary_resolve_attach, is_multi);
7854    }
7855    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
7856    if (primary_desc.pDepthStencilAttachment) {
7857        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
7858    }
7859    if (secondary_desc.pDepthStencilAttachment) {
7860        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
7861    }
7862    skip |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach, secondaryBuffer,
7863                                            secondaryPassCI, secondary_depthstencil_attach, is_multi);
7864    return skip;
7865}
7866
7867// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
7868//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
7869//  will then feed into this function
7870static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
7871                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
7872                                            VkRenderPassCreateInfo const *secondaryPassCI) {
7873    bool skip = false;
7874
7875    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
7876        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7877                        HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7878                        "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
7879                        " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
7880                        " that has a subpassCount of %u.",
7881                        HandleToUint64(secondaryBuffer), secondaryPassCI->subpassCount, HandleToUint64(primaryBuffer),
7882                        primaryPassCI->subpassCount);
7883    } else {
7884        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
7885            skip |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
7886                                                 primaryPassCI->subpassCount > 1);
7887        }
7888    }
7889    return skip;
7890}
7891
7892static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
7893                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
7894    bool skip = false;
7895    if (!pSubCB->beginInfo.pInheritanceInfo) {
7896        return skip;
7897    }
7898    VkFramebuffer primary_fb = pCB->activeFramebuffer;
7899    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
7900    if (secondary_fb != VK_NULL_HANDLE) {
7901        if (primary_fb != secondary_fb) {
7902            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7903                            HandleToUint64(primaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c6, "DS",
7904                            "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
7905                            " which has a framebuffer 0x%" PRIx64
7906                            " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
7907                            HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb),
7908                            validation_error_map[VALIDATION_ERROR_1b2000c6]);
7909        }
7910        auto fb = GetFramebufferState(dev_data, secondary_fb);
7911        if (!fb) {
7912            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7913                            HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7914                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7915                            "which has invalid framebuffer 0x%" PRIx64 ".",
7916                            (void *)secondaryBuffer, HandleToUint64(secondary_fb));
7917            return skip;
7918        }
7919        auto cb_renderpass = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
7920        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
7921            skip |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
7922                                                    cb_renderpass->createInfo.ptr());
7923        }
7924    }
7925    return skip;
7926}
7927
7928static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
7929    bool skip = false;
7930    unordered_set<int> activeTypes;
7931    for (auto queryObject : pCB->activeQueries) {
7932        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
7933        if (queryPoolData != dev_data->queryPoolMap.end()) {
7934            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
7935                pSubCB->beginInfo.pInheritanceInfo) {
7936                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
7937                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
7938                    skip |= log_msg(
7939                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7940                        HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1b2000d0, "DS",
7941                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7942                        "which has invalid active query pool 0x%" PRIx64
7943                        ". Pipeline statistics is being queried so the command "
7944                        "buffer must have all bits set on the queryPool. %s",
7945                        pCB->commandBuffer, HandleToUint64(queryPoolData->first), validation_error_map[VALIDATION_ERROR_1b2000d0]);
7946                }
7947            }
7948            activeTypes.insert(queryPoolData->second.createInfo.queryType);
7949        }
7950    }
7951    for (auto queryObject : pSubCB->startedQueries) {
7952        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
7953        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
7954            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7955                            HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
7956                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
7957                            "which has invalid active query pool 0x%" PRIx64
7958                            "of type %d but a query of that type has been started on "
7959                            "secondary Cmd Buffer 0x%p.",
7960                            pCB->commandBuffer, HandleToUint64(queryPoolData->first), queryPoolData->second.createInfo.queryType,
7961                            pSubCB->commandBuffer);
7962        }
7963    }
7964
7965    auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
7966    auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
7967    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
7968        skip |=
7969            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7970                    HandleToUint64(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
7971                    "vkCmdExecuteCommands(): Primary command buffer 0x%p"
7972                    " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
7973                    pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
7974    }
7975
7976    return skip;
7977}
7978
7979VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
7980                                              const VkCommandBuffer *pCommandBuffers) {
7981    bool skip = false;
7982    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7983    unique_lock_t lock(global_lock);
7984    GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7985    if (pCB) {
7986        GLOBAL_CB_NODE *pSubCB = NULL;
7987        for (uint32_t i = 0; i < commandBuffersCount; i++) {
7988            pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
7989            assert(pSubCB);
7990            if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
7991                skip |=
7992                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7993                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000b0, "DS",
7994                            "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
7995                            "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
7996                            pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_1b2000b0]);
7997            } else if (pCB->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
7998                if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
7999                    auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
8000                    if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
8001                        skip |= log_msg(
8002                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8003                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000c0, "DS",
8004                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
8005                            ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT "
8006                            "set. %s",
8007                            pCommandBuffers[i], HandleToUint64(pCB->activeRenderPass->renderPass),
8008                            validation_error_map[VALIDATION_ERROR_1b2000c0]);
8009                    } else {
8010                        // Make sure render pass is compatible with parent command buffer pass if has continue
8011                        if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
8012                            skip |=
8013                                validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
8014                                                                pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
8015                        }
8016                        //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
8017                        skip |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
8018                    }
8019                    string errorString = "";
8020                    // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
8021                    if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
8022                        !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
8023                                                         secondary_rp_state->createInfo.ptr(), errorString)) {
8024                        skip |= log_msg(
8025                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8026                            HandleToUint64(pCommandBuffers[i]), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
8027                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
8028                            ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
8029                            pCommandBuffers[i], HandleToUint64(pSubCB->beginInfo.pInheritanceInfo->renderPass), commandBuffer,
8030                            HandleToUint64(pCB->activeRenderPass->renderPass), errorString.c_str());
8031                    }
8032                }
8033            }
8034            // TODO(mlentine): Move more logic into this method
8035            skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
8036            skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0, VALIDATION_ERROR_1b2000b2);
8037            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
8038                if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
8039                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8040                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
8041                                    VALIDATION_ERROR_1b2000b4, "DS",
8042                                    "Attempt to simultaneously execute command buffer 0x%p"
8043                                    " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
8044                                    pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_1b2000b4]);
8045                }
8046                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
8047                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
8048                    skip |= log_msg(
8049                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8050                        HandleToUint64(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
8051                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
8052                        "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
8053                        "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
8054                        "set, even though it does.",
8055                        pCommandBuffers[i], pCB->commandBuffer);
8056                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
8057                }
8058            }
8059            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
8060                skip |=
8061                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8062                            HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000ca, "DS",
8063                            "vkCmdExecuteCommands(): Secondary Command Buffer "
8064                            "(0x%p) cannot be submitted with a query in "
8065                            "flight and inherited queries not "
8066                            "supported on this device. %s",
8067                            pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_1b2000ca]);
8068            }
8069            // TODO: separate validate from update! This is very tangled.
8070            // Propagate layout transitions to the primary cmd buffer
8071            for (auto ilm_entry : pSubCB->imageLayoutMap) {
8072                SetLayout(dev_data, pCB, ilm_entry.first, ilm_entry.second);
8073            }
8074            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
8075            pCB->linkedCommandBuffers.insert(pSubCB);
8076            pSubCB->linkedCommandBuffers.insert(pCB);
8077            for (auto &function : pSubCB->queryUpdates) {
8078                pCB->queryUpdates.push_back(function);
8079            }
8080        }
8081        skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_1b200019);
8082        skip |=
8083            ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
8084                                  VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b202415);
8085        skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
8086    }
8087    lock.unlock();
8088    if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
8089}
8090
8091VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
8092                                         void **ppData) {
8093    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8094
8095    bool skip = false;
8096    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8097    unique_lock_t lock(global_lock);
8098    DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
8099    if (mem_info) {
8100        // TODO : This could me more fine-grained to track just region that is valid
8101        mem_info->global_valid = true;
8102        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
8103        skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
8104        // TODO : Do we need to create new "bound_range" for the mapped range?
8105        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
8106        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
8107             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
8108            skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8109                           HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200554, "MEM",
8110                           "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
8111                           HandleToUint64(mem), validation_error_map[VALIDATION_ERROR_31200554]);
8112        }
8113    }
8114    skip |= ValidateMapMemRange(dev_data, mem, offset, size);
8115    lock.unlock();
8116
8117    if (!skip) {
8118        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
8119        if (VK_SUCCESS == result) {
8120            lock.lock();
8121            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
8122            storeMemRanges(dev_data, mem, offset, size);
8123            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
8124            lock.unlock();
8125        }
8126    }
8127    return result;
8128}
8129
8130VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
8131    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8132    bool skip = false;
8133
8134    unique_lock_t lock(global_lock);
8135    skip |= deleteMemRanges(dev_data, mem);
8136    lock.unlock();
8137    if (!skip) {
8138        dev_data->dispatch_table.UnmapMemory(device, mem);
8139    }
8140}
8141
8142static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
8143                                   const VkMappedMemoryRange *pMemRanges) {
8144    bool skip = false;
8145    for (uint32_t i = 0; i < memRangeCount; ++i) {
8146        auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
8147        if (mem_info) {
8148            if (pMemRanges[i].size == VK_WHOLE_SIZE) {
8149                if (mem_info->mem_range.offset > pMemRanges[i].offset) {
8150                    skip |=
8151                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8152                                HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055c, "MEM",
8153                                "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
8154                                ") is less than Memory Object's offset "
8155                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
8156                                funcName, static_cast<size_t>(pMemRanges[i].offset),
8157                                static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_0c20055c]);
8158                }
8159            } else {
8160                const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
8161                                              ? mem_info->alloc_info.allocationSize
8162                                              : (mem_info->mem_range.offset + mem_info->mem_range.size);
8163                if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
8164                    (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
8165                    skip |=
8166                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8167                                HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055a, "MEM",
8168                                "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
8169                                ") exceed the Memory Object's upper-bound "
8170                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
8171                                funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
8172                                static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
8173                                validation_error_map[VALIDATION_ERROR_0c20055a]);
8174                }
8175            }
8176        }
8177    }
8178    return skip;
8179}
8180
8181static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
8182                                                     const VkMappedMemoryRange *mem_ranges) {
8183    bool skip = false;
8184    for (uint32_t i = 0; i < mem_range_count; ++i) {
8185        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
8186        if (mem_info) {
8187            if (mem_info->shadow_copy) {
8188                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
8189                                        ? mem_info->mem_range.size
8190                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
8191                char *data = static_cast<char *>(mem_info->shadow_copy);
8192                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
8193                    if (data[j] != NoncoherentMemoryFillValue) {
8194                        skip |= log_msg(
8195                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8196                            HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
8197                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, HandleToUint64(mem_ranges[i].memory));
8198                    }
8199                }
8200                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
8201                    if (data[j] != NoncoherentMemoryFillValue) {
8202                        skip |= log_msg(
8203                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8204                            HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
8205                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, HandleToUint64(mem_ranges[i].memory));
8206                    }
8207                }
8208                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
8209            }
8210        }
8211    }
8212    return skip;
8213}
8214
8215static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
8216    for (uint32_t i = 0; i < mem_range_count; ++i) {
8217        auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
8218        if (mem_info && mem_info->shadow_copy) {
8219            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
8220                                    ? mem_info->mem_range.size
8221                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
8222            char *data = static_cast<char *>(mem_info->shadow_copy);
8223            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
8224        }
8225    }
8226}
8227
8228static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
8229                                                  const VkMappedMemoryRange *mem_ranges) {
8230    bool skip = false;
8231    for (uint32_t i = 0; i < mem_range_count; ++i) {
8232        uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
8233        if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
8234            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8235                            HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c20055e, "MEM",
8236                            "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
8237                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
8238                            func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_0c20055e]);
8239        }
8240        if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
8241            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
8242                            HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c200560, "MEM",
8243                            "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
8244                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
8245                            func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_0c200560]);
8246        }
8247    }
8248    return skip;
8249}
8250
8251static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8252                                                   const VkMappedMemoryRange *mem_ranges) {
8253    bool skip = false;
8254    lock_guard_t lock(global_lock);
8255    skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
8256    skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
8257    return skip;
8258}
8259
8260VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
8261                                                       const VkMappedMemoryRange *pMemRanges) {
8262    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8263    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8264
8265    if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
8266        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
8267    }
8268    return result;
8269}
8270
8271static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8272                                                        const VkMappedMemoryRange *mem_ranges) {
8273    bool skip = false;
8274    lock_guard_t lock(global_lock);
8275    skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
8276    return skip;
8277}
8278
8279static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
8280                                                       const VkMappedMemoryRange *mem_ranges) {
8281    lock_guard_t lock(global_lock);
8282    // Update our shadow copy with modified driver data
8283    CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
8284}
8285
8286VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
8287                                                            const VkMappedMemoryRange *pMemRanges) {
8288    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8289    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8290
8291    if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
8292        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
8293        if (result == VK_SUCCESS) {
8294            PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
8295        }
8296    }
8297    return result;
8298}
8299
8300static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
8301                                           VkDeviceSize memoryOffset) {
8302    bool skip = false;
8303    if (image_state) {
8304        unique_lock_t lock(global_lock);
8305        // Track objects tied to memory
8306        uint64_t image_handle = HandleToUint64(image);
8307        skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
8308        if (!image_state->memory_requirements_checked) {
8309            // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
8310            // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
8311            // vkGetImageMemoryRequirements()
8312            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8313                            image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
8314                            "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
8315                            " but vkGetImageMemoryRequirements() has not been called on that image.",
8316                            image_handle);
8317            // Make the call for them so we can verify the state
8318            lock.unlock();
8319            dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
8320            lock.lock();
8321        }
8322
8323        // Validate bound memory range information
8324        auto mem_info = GetMemObjInfo(dev_data, mem);
8325        if (mem_info) {
8326            skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
8327                                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, "vkBindImageMemory()");
8328            skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
8329                                        VALIDATION_ERROR_1740082e);
8330        }
8331
8332        // Validate memory requirements alignment
8333        if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
8334            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8335                            image_handle, __LINE__, VALIDATION_ERROR_17400830, "DS",
8336                            "vkBindImageMemory(): memoryOffset is 0x%" PRIxLEAST64
8337                            " but must be an integer multiple of the "
8338                            "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
8339                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
8340                            memoryOffset, image_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_17400830]);
8341        }
8342
8343        // Validate memory requirements size
8344        if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
8345            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8346                            image_handle, __LINE__, VALIDATION_ERROR_17400832, "DS",
8347                            "vkBindImageMemory(): memory size minus memoryOffset is 0x%" PRIxLEAST64
8348                            " but must be at least as large as "
8349                            "VkMemoryRequirements::size value 0x%" PRIxLEAST64
8350                            ", returned from a call to vkGetImageMemoryRequirements with image. %s",
8351                            mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size,
8352                            validation_error_map[VALIDATION_ERROR_17400832]);
8353        }
8354    }
8355    return skip;
8356}
8357
8358static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
8359                                          VkDeviceSize memoryOffset) {
8360    if (image_state) {
8361        unique_lock_t lock(global_lock);
8362        // Track bound memory range information
8363        auto mem_info = GetMemObjInfo(dev_data, mem);
8364        if (mem_info) {
8365            InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
8366                                   image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
8367        }
8368
8369        // Track objects tied to memory
8370        uint64_t image_handle = HandleToUint64(image);
8371        SetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, "vkBindImageMemory()");
8372
8373        image_state->binding.mem = mem;
8374        image_state->binding.offset = memoryOffset;
8375        image_state->binding.size = image_state->requirements.size;
8376    }
8377}
8378
8379VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
8380    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8381    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8382    auto image_state = GetImageState(dev_data, image);
8383    bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
8384    if (!skip) {
8385        result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
8386        if (result == VK_SUCCESS) {
8387            PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset);
8388        }
8389    }
8390    return result;
8391}
8392
8393VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
8394    bool skip = false;
8395    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8396    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8397    unique_lock_t lock(global_lock);
8398    auto event_state = GetEventNode(dev_data, event);
8399    if (event_state) {
8400        event_state->needsSignaled = false;
8401        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
8402        if (event_state->write_in_use) {
8403            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8404                            HandleToUint64(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8405                            "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
8406                            HandleToUint64(event));
8407        }
8408    }
8409    lock.unlock();
8410    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
8411    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
8412    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
8413    for (auto queue_data : dev_data->queueMap) {
8414        auto event_entry = queue_data.second.eventToStageMap.find(event);
8415        if (event_entry != queue_data.second.eventToStageMap.end()) {
8416            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
8417        }
8418    }
8419    if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
8420    return result;
8421}
8422
8423VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
8424                                               VkFence fence) {
8425    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8426    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8427    bool skip = false;
8428    unique_lock_t lock(global_lock);
8429    auto pFence = GetFenceNode(dev_data, fence);
8430    auto pQueue = GetQueueState(dev_data, queue);
8431
8432    // First verify that fence is not in use
8433    skip |= ValidateFenceForSubmit(dev_data, pFence);
8434
8435    if (pFence) {
8436        SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
8437    }
8438
8439    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
8440        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
8441        // Track objects tied to memory
8442        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
8443            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
8444                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
8445                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
8446                                        HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer))
8447                    skip = true;
8448            }
8449        }
8450        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
8451            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
8452                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
8453                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
8454                                        HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage))
8455                    skip = true;
8456            }
8457        }
8458        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
8459            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
8460                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
8461                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
8462                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
8463                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
8464                                        HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage))
8465                    skip = true;
8466            }
8467        }
8468
8469        std::vector<SEMAPHORE_WAIT> semaphore_waits;
8470        std::vector<VkSemaphore> semaphore_signals;
8471        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
8472            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
8473            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
8474            if (pSemaphore) {
8475                if (pSemaphore->signaled) {
8476                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
8477                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
8478                        pSemaphore->in_use.fetch_add(1);
8479                    }
8480                    pSemaphore->signaler.first = VK_NULL_HANDLE;
8481                    pSemaphore->signaled = false;
8482                } else {
8483                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
8484                                    HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8485                                    "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64
8486                                    " that has no way to be signaled.",
8487                                    queue, HandleToUint64(semaphore));
8488                }
8489            }
8490        }
8491        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
8492            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
8493            auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
8494            if (pSemaphore) {
8495                if (pSemaphore->signaled) {
8496                    skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
8497                                   HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8498                                   "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
8499                                   ", but that semaphore is already signaled.",
8500                                   queue, HandleToUint64(semaphore));
8501                } else {
8502                    pSemaphore->signaler.first = queue;
8503                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
8504                    pSemaphore->signaled = true;
8505                    pSemaphore->in_use.fetch_add(1);
8506                    semaphore_signals.push_back(semaphore);
8507                }
8508            }
8509        }
8510
8511        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals,
8512                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
8513    }
8514
8515    if (pFence && !bindInfoCount) {
8516        // No work to do, just dropping a fence in the queue by itself.
8517        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(), std::vector<VkSemaphore>(),
8518                                         fence);
8519    }
8520
8521    lock.unlock();
8522
8523    if (!skip) return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
8524
8525    return result;
8526}
8527
8528VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
8529                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
8530    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8531    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
8532    if (result == VK_SUCCESS) {
8533        lock_guard_t lock(global_lock);
8534        SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
8535        sNode->signaler.first = VK_NULL_HANDLE;
8536        sNode->signaler.second = 0;
8537        sNode->signaled = false;
8538    }
8539    return result;
8540}
8541
8542VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
8543                                           const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
8544    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8545    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
8546    if (result == VK_SUCCESS) {
8547        lock_guard_t lock(global_lock);
8548        dev_data->eventMap[*pEvent].needsSignaled = false;
8549        dev_data->eventMap[*pEvent].write_in_use = 0;
8550        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
8551    }
8552    return result;
8553}
8554
8555static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
8556                                              VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
8557                                              SWAPCHAIN_NODE *old_swapchain_state) {
8558    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
8559
8560    // TODO: revisit this. some of these rules are being relaxed.
8561
8562    // All physical devices and queue families are required to be able
8563    // to present to any native window on Android; require the
8564    // application to have established support on any other platform.
8565    if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
8566        auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::const_reference qs) -> bool {
8567            // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
8568            return (qs.first.gpu == dev_data->physical_device) && qs.second;
8569        };
8570        const auto& support = surface_state->gpu_queue_support;
8571        bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
8572
8573        if (!is_supported) {
8574            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8575                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ec, "DS",
8576                        "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. "
8577                        "The vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support "
8578                        "with this surface for at least one queue family of this device. %s",
8579                        func_name, validation_error_map[VALIDATION_ERROR_146009ec]))
8580                return true;
8581        }
8582    }
8583
8584    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
8585        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8586                    HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
8587                    "%s: surface has an existing swapchain other than oldSwapchain", func_name))
8588            return true;
8589    }
8590    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
8591        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8592                    HandleToUint64(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE, "DS",
8593                    "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
8594            return true;
8595    }
8596    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
8597    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
8598        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
8599                    HandleToUint64(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8600                    "%s: surface capabilities not retrieved for this physical device", func_name))
8601            return true;
8602    } else {  // have valid capabilities
8603        auto &capabilities = physical_device_state->surfaceCapabilities;
8604        // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
8605        if (pCreateInfo->minImageCount < capabilities.minImageCount) {
8606            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8607                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ee, "DS",
8608                        "%s called with minImageCount = %d, which is outside the bounds returned "
8609                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
8610                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
8611                        validation_error_map[VALIDATION_ERROR_146009ee]))
8612                return true;
8613        }
8614
8615        if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
8616            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8617                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f0, "DS",
8618                        "%s called with minImageCount = %d, which is outside the bounds returned "
8619                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
8620                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
8621                        validation_error_map[VALIDATION_ERROR_146009f0]))
8622                return true;
8623        }
8624
8625        // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
8626        if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
8627            ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
8628             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
8629             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
8630             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
8631            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8632                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f4, "DS",
8633                        "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
8634                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
8635                        "maxImageExtent = (%d,%d). %s",
8636                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
8637                        capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
8638                        capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
8639                        validation_error_map[VALIDATION_ERROR_146009f4]))
8640                return true;
8641        }
8642        // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
8643        // VkSurfaceCapabilitiesKHR::supportedTransforms.
8644        if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
8645            !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
8646            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
8647            // it up a little at a time, and then log it:
8648            std::string errorString = "";
8649            char str[1024];
8650            // Here's the first part of the message:
8651            sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
8652                    string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
8653            errorString += str;
8654            for (int i = 0; i < 32; i++) {
8655                // Build up the rest of the message:
8656                if ((1 << i) & capabilities.supportedTransforms) {
8657                    const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
8658                    sprintf(str, "    %s\n", newStr);
8659                    errorString += str;
8660                }
8661            }
8662            // Log the message that we've built up:
8663            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8664                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009fe, "DS", "%s. %s", errorString.c_str(),
8665                        validation_error_map[VALIDATION_ERROR_146009fe]))
8666                return true;
8667        }
8668
8669        // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
8670        // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
8671        if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
8672            !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
8673            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
8674            // it up a little at a time, and then log it:
8675            std::string errorString = "";
8676            char str[1024];
8677            // Here's the first part of the message:
8678            sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
8679                    func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
8680            errorString += str;
8681            for (int i = 0; i < 32; i++) {
8682                // Build up the rest of the message:
8683                if ((1 << i) & capabilities.supportedCompositeAlpha) {
8684                    const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
8685                    sprintf(str, "    %s\n", newStr);
8686                    errorString += str;
8687                }
8688            }
8689            // Log the message that we've built up:
8690            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8691                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a00, "DS", "%s. %s", errorString.c_str(),
8692                        validation_error_map[VALIDATION_ERROR_14600a00]))
8693                return true;
8694        }
8695        // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
8696        if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
8697            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8698                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f6, "DS",
8699                        "%s called with a non-supported imageArrayLayers (i.e. %d).  Minimum value is 1, maximum value is %d. %s",
8700                        func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
8701                        validation_error_map[VALIDATION_ERROR_146009f6]))
8702                return true;
8703        }
8704        // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
8705        if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
8706            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8707                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f8, "DS",
8708                        "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
8709                        func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
8710                        validation_error_map[VALIDATION_ERROR_146009f8]))
8711                return true;
8712        }
8713    }
8714
8715    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
8716    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
8717        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8718                    HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8719                    "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
8720            return true;
8721    } else {
8722        // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
8723        bool foundFormat = false;
8724        bool foundColorSpace = false;
8725        bool foundMatch = false;
8726        for (auto const &format : physical_device_state->surface_formats) {
8727            if (pCreateInfo->imageFormat == format.format) {
8728                // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
8729                foundFormat = true;
8730                if (pCreateInfo->imageColorSpace == format.colorSpace) {
8731                    foundMatch = true;
8732                    break;
8733                }
8734            } else {
8735                if (pCreateInfo->imageColorSpace == format.colorSpace) {
8736                    foundColorSpace = true;
8737                }
8738            }
8739        }
8740        if (!foundMatch) {
8741            if (!foundFormat) {
8742                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8743                            HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
8744                            "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s", func_name,
8745                            pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_146009f2]))
8746                    return true;
8747            }
8748            if (!foundColorSpace) {
8749                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8750                            HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
8751                            "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s", func_name,
8752                            pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_146009f2]))
8753                    return true;
8754            }
8755        }
8756    }
8757
8758    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
8759    if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
8760        // FIFO is required to always be supported
8761        if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
8762            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8763                        HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
8764                        "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
8765                return true;
8766        }
8767    } else {
8768        // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
8769        bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
8770                                    pCreateInfo->presentMode) != physical_device_state->present_modes.end();
8771        if (!foundMatch) {
8772            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8773                        HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a02, "DS",
8774                        "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
8775                        string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_14600a02]))
8776                return true;
8777        }
8778    }
8779    // Validate state for shared presentable case
8780    if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
8781        VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
8782        if (!dev_data->extensions.vk_khr_shared_presentable_image) {
8783            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8784                        HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_EXTENSION_NOT_ENABLED, "DS",
8785                        "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
8786                        "been enabled.",
8787                        func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
8788                return true;
8789        } else if (pCreateInfo->minImageCount != 1) {
8790            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8791                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_14600ace, "DS",
8792                        "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
8793                        "must be 1. %s",
8794                        func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount,
8795                        validation_error_map[VALIDATION_ERROR_14600ace]))
8796                return true;
8797        }
8798    }
8799
8800    return false;
8801}
8802
8803static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
8804                                             VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
8805                                             SWAPCHAIN_NODE *old_swapchain_state) {
8806    if (VK_SUCCESS == result) {
8807        lock_guard_t lock(global_lock);
8808        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
8809        if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
8810            VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
8811            swapchain_state->shared_presentable = true;
8812        }
8813        surface_state->swapchain = swapchain_state.get();
8814        dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
8815    } else {
8816        surface_state->swapchain = nullptr;
8817    }
8818    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
8819    if (old_swapchain_state) {
8820        old_swapchain_state->replaced = true;
8821    }
8822    surface_state->old_swapchain = old_swapchain_state;
8823    return;
8824}
8825
8826VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
8827                                                  const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
8828    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8829    auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
8830    auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
8831
8832    if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
8833        return VK_ERROR_VALIDATION_FAILED_EXT;
8834    }
8835
8836    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
8837
8838    PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
8839
8840    return result;
8841}
8842
8843VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
8844    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8845    bool skip = false;
8846
8847    unique_lock_t lock(global_lock);
8848    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
8849    if (swapchain_data) {
8850        if (swapchain_data->images.size() > 0) {
8851            for (auto swapchain_image : swapchain_data->images) {
8852                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
8853                if (image_sub != dev_data->imageSubresourceMap.end()) {
8854                    for (auto imgsubpair : image_sub->second) {
8855                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
8856                        if (image_item != dev_data->imageLayoutMap.end()) {
8857                            dev_data->imageLayoutMap.erase(image_item);
8858                        }
8859                    }
8860                    dev_data->imageSubresourceMap.erase(image_sub);
8861                }
8862                skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
8863                dev_data->imageMap.erase(swapchain_image);
8864            }
8865        }
8866
8867        auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
8868        if (surface_state) {
8869            if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
8870            if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
8871        }
8872
8873        dev_data->swapchainMap.erase(swapchain);
8874    }
8875    lock.unlock();
8876    if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
8877}
8878
8879static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
8880                                                 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
8881    bool skip = false;
8882    if (swapchain_state && pSwapchainImages) {
8883        lock_guard_t lock(global_lock);
8884        // Compare the preliminary value of *pSwapchainImageCount with the value this time:
8885        if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
8886            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8887                            HandleToUint64(device), __LINE__, SWAPCHAIN_PRIOR_COUNT, "DS",
8888                            "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive "
8889                            "value has been seen for pSwapchainImages.");
8890        } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
8891            skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8892                            HandleToUint64(device), __LINE__, SWAPCHAIN_INVALID_COUNT, "DS",
8893                            "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with "
8894                            "pSwapchainImages set to a value (%d) that is greater than the value (%d) that was returned when "
8895                            "pSwapchainImageCount was NULL.",
8896                            *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
8897        }
8898    }
8899    return skip;
8900}
8901
8902static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
8903                                                uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
8904    lock_guard_t lock(global_lock);
8905
8906    if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
8907
8908    if (pSwapchainImages) {
8909        if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
8910            swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
8911        }
8912        for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
8913            if (swapchain_state->images[i] != VK_NULL_HANDLE) continue;  // Already retrieved this.
8914
8915            IMAGE_LAYOUT_NODE image_layout_node;
8916            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
8917            image_layout_node.format = swapchain_state->createInfo.imageFormat;
8918            // Add imageMap entries for each swapchain image
8919            VkImageCreateInfo image_ci = {};
8920            image_ci.flags = 0;
8921            image_ci.imageType = VK_IMAGE_TYPE_2D;
8922            image_ci.format = swapchain_state->createInfo.imageFormat;
8923            image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
8924            image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
8925            image_ci.extent.depth = 1;
8926            image_ci.mipLevels = 1;
8927            image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
8928            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
8929            image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
8930            image_ci.usage = swapchain_state->createInfo.imageUsage;
8931            image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
8932            device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
8933            auto &image_state = device_data->imageMap[pSwapchainImages[i]];
8934            image_state->valid = false;
8935            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
8936            swapchain_state->images[i] = pSwapchainImages[i];
8937            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
8938            device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
8939            device_data->imageLayoutMap[subpair] = image_layout_node;
8940        }
8941    }
8942
8943    if (*pSwapchainImageCount) {
8944        if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
8945            swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
8946        }
8947        swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
8948    }
8949}
8950
8951VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
8952                                                     VkImage *pSwapchainImages) {
8953    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8954    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8955
8956    auto swapchain_state = GetSwapchainNode(device_data, swapchain);
8957    bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
8958
8959    if (!skip) {
8960        result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
8961    }
8962
8963    if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
8964        PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
8965    }
8966    return result;
8967}
8968
8969VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
8970    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8971    bool skip = false;
8972
8973    lock_guard_t lock(global_lock);
8974    auto queue_state = GetQueueState(dev_data, queue);
8975
8976    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
8977        auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
8978        if (pSemaphore && !pSemaphore->signaled) {
8979            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8980                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
8981                            "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
8982                            HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
8983        }
8984    }
8985
8986    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
8987        auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
8988        if (swapchain_data) {
8989            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
8990                skip |=
8991                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
8992                            HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
8993                            "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
8994                            pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
8995            } else {
8996                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
8997                auto image_state = GetImageState(dev_data, image);
8998
8999                if (image_state->shared_presentable) {
9000                    image_state->layout_locked = true;
9001                }
9002
9003                skip |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
9004
9005                if (!image_state->acquired) {
9006                    skip |= log_msg(
9007                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9008                        HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED, "DS",
9009                        "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
9010                }
9011
9012                vector<VkImageLayout> layouts;
9013                if (FindLayouts(dev_data, image, layouts)) {
9014                    for (auto layout : layouts) {
9015                        if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) &&
9016                            (!dev_data->extensions.vk_khr_shared_presentable_image ||
9017                             (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
9018                            skip |=
9019                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
9020                                        HandleToUint64(queue), __LINE__, VALIDATION_ERROR_11200a20, "DS",
9021                                        "Images passed to present must be in layout "
9022                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s. %s",
9023                                        string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_11200a20]);
9024                        }
9025                    }
9026                }
9027            }
9028
9029            // All physical devices and queue families are required to be able
9030            // to present to any native window on Android; require the
9031            // application to have established support on any other platform.
9032            if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
9033                auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
9034                auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
9035
9036                if (support_it == surface_state->gpu_queue_support.end()) {
9037                    skip |=
9038                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9039                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS",
9040                                "vkQueuePresentKHR: Presenting image without calling "
9041                                "vkGetPhysicalDeviceSurfaceSupportKHR");
9042                } else if (!support_it->second) {
9043                    skip |=
9044                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9045                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_31800a18, "DS",
9046                                "vkQueuePresentKHR: Presenting image on queue that cannot "
9047                                "present to this surface. %s",
9048                                validation_error_map[VALIDATION_ERROR_31800a18]);
9049                }
9050            }
9051        }
9052    }
9053    if (pPresentInfo && pPresentInfo->pNext) {
9054        // Verify ext struct
9055        struct std_header {
9056            VkStructureType sType;
9057            const void *pNext;
9058        };
9059        std_header *pnext = (std_header *)pPresentInfo->pNext;
9060        while (pnext) {
9061            if (VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR == pnext->sType) {
9062                VkPresentRegionsKHR *present_regions = (VkPresentRegionsKHR *)pnext;
9063                for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
9064                    auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9065                    assert(swapchain_data);
9066                    VkPresentRegionKHR region = present_regions->pRegions[i];
9067                    for (uint32_t j = 0; j < region.rectangleCount; ++j) {
9068                        VkRectLayerKHR rect = region.pRectangles[j];
9069                        // TODO: Need to update these errors to their unique error ids when available
9070                        if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
9071                            skip |= log_msg(
9072                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9073                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9074                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
9075                                "chain, pRegion[%i].pRectangles[%i], the sum of offset.x "
9076                                "(%i) and extent.width (%i) is greater than the "
9077                                "corresponding swapchain's imageExtent.width (%i).",
9078                                i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
9079                        }
9080                        if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
9081                            skip |= log_msg(
9082                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9083                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9084                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext "
9085                                "chain, pRegion[%i].pRectangles[%i], the sum of offset.y "
9086                                "(%i) and extent.height (%i) is greater than the "
9087                                "corresponding swapchain's imageExtent.height (%i).",
9088                                i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
9089                        }
9090                        if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
9091                            skip |= log_msg(
9092                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9093                                HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
9094                                "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the "
9095                                "layer (%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
9096                                i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
9097                        }
9098                    }
9099                }
9100            } else if (VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE == pnext->sType) {
9101                VkPresentTimesInfoGOOGLE *present_times_info = (VkPresentTimesInfoGOOGLE *)pnext;
9102                if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
9103                    skip |=
9104                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9105                                HandleToUint64(pPresentInfo->pSwapchains[0]), __LINE__,
9106
9107                                VALIDATION_ERROR_118009be, "DS",
9108                                "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but "
9109                                "pPresentInfo->swapchainCount is %i. For VkPresentTimesInfoGOOGLE down pNext "
9110                                "chain of VkPresentInfoKHR, VkPresentTimesInfoGOOGLE.swapchainCount "
9111                                "must equal VkPresentInfoKHR.swapchainCount.",
9112                                present_times_info->swapchainCount, pPresentInfo->swapchainCount);
9113                }
9114            }
9115            pnext = (std_header *)pnext->pNext;
9116        }
9117    }
9118
9119    if (skip) {
9120        return VK_ERROR_VALIDATION_FAILED_EXT;
9121    }
9122
9123    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
9124
9125    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
9126        // Semaphore waits occur before error generation, if the call reached
9127        // the ICD. (Confirm?)
9128        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9129            auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
9130            if (pSemaphore) {
9131                pSemaphore->signaler.first = VK_NULL_HANDLE;
9132                pSemaphore->signaled = false;
9133            }
9134        }
9135
9136        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9137            // Note: this is imperfect, in that we can get confused about what
9138            // did or didn't succeed-- but if the app does that, it's confused
9139            // itself just as much.
9140            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
9141
9142            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
9143
9144            // Mark the image as having been released to the WSI
9145            auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
9146            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
9147            auto image_state = GetImageState(dev_data, image);
9148            image_state->acquired = false;
9149        }
9150
9151        // Note: even though presentation is directed to a queue, there is no
9152        // direct ordering between QP and subsequent work, so QP (and its
9153        // semaphore waits) /never/ participate in any completion proof.
9154    }
9155
9156    return result;
9157}
9158
9159static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
9160                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
9161                                                     std::vector<SURFACE_STATE *> &surface_state,
9162                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
9163    if (pCreateInfos) {
9164        lock_guard_t lock(global_lock);
9165        for (uint32_t i = 0; i < swapchainCount; i++) {
9166            surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
9167            old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
9168            std::stringstream func_name;
9169            func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
9170            if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
9171                                                  old_swapchain_state[i])) {
9172                return true;
9173            }
9174        }
9175    }
9176    return false;
9177}
9178
9179static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
9180                                                    const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
9181                                                    std::vector<SURFACE_STATE *> &surface_state,
9182                                                    std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
9183    if (VK_SUCCESS == result) {
9184        for (uint32_t i = 0; i < swapchainCount; i++) {
9185            auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
9186            if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
9187                VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
9188                swapchain_state->shared_presentable = true;
9189            }
9190            surface_state[i]->swapchain = swapchain_state.get();
9191            dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
9192        }
9193    } else {
9194        for (uint32_t i = 0; i < swapchainCount; i++) {
9195            surface_state[i]->swapchain = nullptr;
9196        }
9197    }
9198    // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
9199    for (uint32_t i = 0; i < swapchainCount; i++) {
9200        if (old_swapchain_state[i]) {
9201            old_swapchain_state[i]->replaced = true;
9202        }
9203        surface_state[i]->old_swapchain = old_swapchain_state[i];
9204    }
9205    return;
9206}
9207
9208VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
9209                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
9210                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
9211    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9212    std::vector<SURFACE_STATE *> surface_state;
9213    std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
9214
9215    if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
9216                                                 old_swapchain_state)) {
9217        return VK_ERROR_VALIDATION_FAILED_EXT;
9218    }
9219
9220    VkResult result =
9221        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
9222
9223    PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
9224                                            old_swapchain_state);
9225
9226    return result;
9227}
9228
9229VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
9230                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
9231    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9232    bool skip = false;
9233
9234    unique_lock_t lock(global_lock);
9235
9236    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
9237        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9238                        HandleToUint64(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
9239                        "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
9240                        "to determine the completion of this operation.");
9241    }
9242
9243    auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9244    if (pSemaphore && pSemaphore->signaled) {
9245        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9246                        HandleToUint64(semaphore), __LINE__, VALIDATION_ERROR_16400a0c, "DS",
9247                        "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
9248                        validation_error_map[VALIDATION_ERROR_16400a0c]);
9249    }
9250
9251    auto pFence = GetFenceNode(dev_data, fence);
9252    if (pFence) {
9253        skip |= ValidateFenceForSubmit(dev_data, pFence);
9254    }
9255
9256    auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
9257
9258    if (swapchain_data->replaced) {
9259        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9260                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
9261                        "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
9262                        "present any images it has acquired, but cannot acquire any more.");
9263    }
9264
9265    auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
9266    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
9267        uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
9268                                                 [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
9269        if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
9270            skip |=
9271                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9272                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
9273                        "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
9274                        acquired_images);
9275        }
9276    }
9277
9278    if (swapchain_data->images.size() == 0) {
9279        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9280                        HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
9281                        "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
9282                        "vkGetSwapchainImagesKHR after swapchain creation.");
9283    }
9284
9285    lock.unlock();
9286
9287    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9288
9289    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
9290
9291    lock.lock();
9292    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
9293        if (pFence) {
9294            pFence->state = FENCE_INFLIGHT;
9295            pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
9296        }
9297
9298        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
9299        if (pSemaphore) {
9300            pSemaphore->signaled = true;
9301            pSemaphore->signaler.first = VK_NULL_HANDLE;
9302        }
9303
9304        // Mark the image as acquired.
9305        auto image = swapchain_data->images[*pImageIndex];
9306        auto image_state = GetImageState(dev_data, image);
9307        image_state->acquired = true;
9308        image_state->shared_presentable = swapchain_data->shared_presentable;
9309    }
9310    lock.unlock();
9311
9312    return result;
9313}
9314
9315VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
9316                                                        VkPhysicalDevice *pPhysicalDevices) {
9317    bool skip = false;
9318    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9319    assert(instance_data);
9320
9321    // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
9322    if (NULL == pPhysicalDevices) {
9323        instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
9324    } else {
9325        if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
9326            // Flag warning here. You can call this without having queried the count, but it may not be
9327            // robust on platforms with multiple physical devices.
9328            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
9329                            0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9330                            "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
9331                            "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
9332        }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
9333        else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
9334            // Having actual count match count from app is not a requirement, so this can be a warning
9335            skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9336                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9337                            "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
9338                            "supported by this instance is %u.",
9339                            *pPhysicalDeviceCount, instance_data->physical_devices_count);
9340        }
9341        instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
9342    }
9343    if (skip) {
9344        return VK_ERROR_VALIDATION_FAILED_EXT;
9345    }
9346    VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
9347    if (NULL == pPhysicalDevices) {
9348        instance_data->physical_devices_count = *pPhysicalDeviceCount;
9349    } else if (result == VK_SUCCESS) {  // Save physical devices
9350        for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
9351            auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
9352            phys_device_state.phys_device = pPhysicalDevices[i];
9353            // Init actual features for each physical device
9354            instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
9355        }
9356    }
9357    return result;
9358}
9359
9360// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
9361static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
9362                                                                 PHYSICAL_DEVICE_STATE *pd_state,
9363                                                                 uint32_t requested_queue_family_property_count, bool qfp_null,
9364                                                                 const char *caller_name) {
9365    bool skip = false;
9366    if (!qfp_null) {
9367        // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
9368        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
9369            skip |= log_msg(
9370                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9371                HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
9372                "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
9373                "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
9374                caller_name, caller_name);
9375            // Then verify that pCount that is passed in on second call matches what was returned
9376        } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
9377            skip |= log_msg(
9378                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9379                HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
9380                "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
9381                ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
9382                ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
9383                "previously obtained by calling %s with NULL pQueueFamilyProperties.",
9384                caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
9385        }
9386        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
9387    }
9388
9389    return skip;
9390}
9391
9392static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
9393                                                                  PHYSICAL_DEVICE_STATE *pd_state,
9394                                                                  uint32_t *pQueueFamilyPropertyCount,
9395                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
9396    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
9397                                                                (nullptr == pQueueFamilyProperties),
9398                                                                "vkGetPhysicalDeviceQueueFamilyProperties()");
9399}
9400
9401static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_layer_data *instance_data,
9402                                                                      PHYSICAL_DEVICE_STATE *pd_state,
9403                                                                      uint32_t *pQueueFamilyPropertyCount,
9404                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9405    return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
9406                                                                (nullptr == pQueueFamilyProperties),
9407                                                                "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
9408}
9409
9410// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
9411static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9412                                                                    VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9413    if (!pQueueFamilyProperties) {
9414        if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
9415            pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
9416        pd_state->queue_family_count = count;
9417    } else {  // Save queue family properties
9418        pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
9419        pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
9420
9421        pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
9422        for (uint32_t i = 0; i < count; ++i) {
9423            pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
9424        }
9425    }
9426}
9427
9428static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9429                                                                 VkQueueFamilyProperties *pQueueFamilyProperties) {
9430    VkQueueFamilyProperties2KHR *pqfp = nullptr;
9431    std::vector<VkQueueFamilyProperties2KHR> qfp;
9432    qfp.resize(count);
9433    if (pQueueFamilyProperties) {
9434        for (uint32_t i = 0; i < count; ++i) {
9435            qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
9436            qfp[i].pNext = nullptr;
9437            qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
9438        }
9439        pqfp = qfp.data();
9440    }
9441    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
9442}
9443
9444static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
9445                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9446    StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
9447}
9448
9449VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
9450                                                                  uint32_t *pQueueFamilyPropertyCount,
9451                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
9452    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9453    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9454    assert(physical_device_state);
9455    unique_lock_t lock(global_lock);
9456
9457    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
9458                                                                      pQueueFamilyPropertyCount, pQueueFamilyProperties);
9459
9460    lock.unlock();
9461
9462    if (skip) return;
9463
9464    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
9465                                                                         pQueueFamilyProperties);
9466
9467    lock.lock();
9468    PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
9469}
9470
9471VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
9472                                                                      uint32_t *pQueueFamilyPropertyCount,
9473                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
9474    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9475    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9476    assert(physical_device_state);
9477    unique_lock_t lock(global_lock);
9478
9479    bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_data, physical_device_state,
9480                                                                          pQueueFamilyPropertyCount, pQueueFamilyProperties);
9481
9482    lock.unlock();
9483
9484    if (skip) return;
9485
9486    instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
9487                                                                             pQueueFamilyProperties);
9488
9489    lock.lock();
9490    PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(physical_device_state, *pQueueFamilyPropertyCount,
9491                                                             pQueueFamilyProperties);
9492}
9493
9494template <typename TCreateInfo, typename FPtr>
9495static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
9496                              VkSurfaceKHR *pSurface, FPtr fptr) {
9497    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9498
9499    // Call down the call chain:
9500    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
9501
9502    if (result == VK_SUCCESS) {
9503        unique_lock_t lock(global_lock);
9504        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
9505        lock.unlock();
9506    }
9507
9508    return result;
9509}
9510
9511VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
9512    bool skip = false;
9513    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9514    unique_lock_t lock(global_lock);
9515    auto surface_state = GetSurfaceState(instance_data, surface);
9516
9517    if ((surface_state) && (surface_state->swapchain)) {
9518        skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
9519            HandleToUint64(instance), __LINE__, VALIDATION_ERROR_26c009e4, "DS",
9520            "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed. %s",
9521            validation_error_map[VALIDATION_ERROR_26c009e4]);
9522    }
9523    instance_data->surface_map.erase(surface);
9524    lock.unlock();
9525    if (!skip) {
9526        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
9527    }
9528}
9529
9530VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
9531                                                            const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9532    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
9533}
9534
9535#ifdef VK_USE_PLATFORM_ANDROID_KHR
9536VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
9537                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9538    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
9539}
9540#endif  // VK_USE_PLATFORM_ANDROID_KHR
9541
9542#ifdef VK_USE_PLATFORM_MIR_KHR
9543VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
9544                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9545    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
9546}
9547
9548VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9549                                                                          uint32_t queueFamilyIndex, MirConnection *connection) {
9550    bool skip = false;
9551    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9552
9553    unique_lock_t lock(global_lock);
9554    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9555
9556    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2d2009e2,
9557                                              "vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
9558
9559    lock.unlock();
9560
9561    if (skip) return VK_FALSE;
9562
9563    // Call down the call chain:
9564    VkBool32 result =
9565        instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
9566
9567    return result;
9568}
9569#endif  // VK_USE_PLATFORM_MIR_KHR
9570
9571#ifdef VK_USE_PLATFORM_WAYLAND_KHR
9572VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
9573                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9574    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
9575}
9576
9577VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9578                                                                              uint32_t queueFamilyIndex,
9579                                                                              struct wl_display *display) {
9580    bool skip = false;
9581    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9582
9583    unique_lock_t lock(global_lock);
9584    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9585
9586    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f000a34,
9587                                              "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
9588
9589    lock.unlock();
9590
9591    if (skip) return VK_FALSE;
9592
9593    // Call down the call chain:
9594    VkBool32 result =
9595        instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
9596
9597    return result;
9598}
9599#endif  // VK_USE_PLATFORM_WAYLAND_KHR
9600
9601#ifdef VK_USE_PLATFORM_WIN32_KHR
9602VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
9603                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9604    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
9605}
9606
9607VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
9608                                                                            uint32_t queueFamilyIndex) {
9609    bool skip = false;
9610    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9611
9612    unique_lock_t lock(global_lock);
9613    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9614
9615    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f200a3a,
9616                                              "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
9617
9618    lock.unlock();
9619
9620    if (skip) return VK_FALSE;
9621
9622    // Call down the call chain:
9623    VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
9624
9625    return result;
9626}
9627#endif  // VK_USE_PLATFORM_WIN32_KHR
9628
9629#ifdef VK_USE_PLATFORM_XCB_KHR
9630VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
9631                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9632    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
9633}
9634
9635VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9636                                                                          uint32_t queueFamilyIndex, xcb_connection_t *connection,
9637                                                                          xcb_visualid_t visual_id) {
9638    bool skip = false;
9639    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9640
9641    unique_lock_t lock(global_lock);
9642    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9643
9644    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f400a40,
9645                                              "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
9646
9647    lock.unlock();
9648
9649    if (skip) return VK_FALSE;
9650
9651    // Call down the call chain:
9652    VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
9653                                                                                               connection, visual_id);
9654
9655    return result;
9656}
9657#endif  // VK_USE_PLATFORM_XCB_KHR
9658
9659#ifdef VK_USE_PLATFORM_XLIB_KHR
9660VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
9661                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
9662    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
9663}
9664
9665VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
9666                                                                           uint32_t queueFamilyIndex, Display *dpy,
9667                                                                           VisualID visualID) {
9668    bool skip = false;
9669    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9670
9671    unique_lock_t lock(global_lock);
9672    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9673
9674    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f600a46,
9675                                              "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
9676
9677    lock.unlock();
9678
9679    if (skip) return VK_FALSE;
9680
9681    // Call down the call chain:
9682    VkBool32 result =
9683        instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
9684
9685    return result;
9686}
9687#endif  // VK_USE_PLATFORM_XLIB_KHR
9688
9689VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9690                                                                       VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
9691    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9692
9693    unique_lock_t lock(global_lock);
9694    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9695    lock.unlock();
9696
9697    auto result =
9698        instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
9699
9700    if (result == VK_SUCCESS) {
9701        physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9702        physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
9703    }
9704
9705    return result;
9706}
9707
9708static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
9709                                                                   VkPhysicalDevice physicalDevice,
9710                                                                   VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
9711    unique_lock_t lock(global_lock);
9712    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9713    physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9714    physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
9715}
9716
9717VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
9718                                                                        const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
9719                                                                        VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
9720    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9721
9722    auto result =
9723        instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
9724
9725    if (result == VK_SUCCESS) {
9726        PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
9727    }
9728
9729    return result;
9730}
9731
9732static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
9733                                                                   VkPhysicalDevice physicalDevice,
9734                                                                   VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
9735    unique_lock_t lock(global_lock);
9736    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9737    physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
9738    physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
9739    physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
9740    physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
9741    physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
9742    physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
9743    physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
9744    physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
9745    physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
9746    physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
9747    physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
9748}
9749
9750VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9751                                                                        VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
9752    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9753
9754    auto result =
9755        instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
9756
9757    if (result == VK_SUCCESS) {
9758        PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
9759    }
9760
9761    return result;
9762}
9763
9764VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
9765                                                                  VkSurfaceKHR surface, VkBool32 *pSupported) {
9766    bool skip = false;
9767    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9768
9769    unique_lock_t lock(global_lock);
9770    const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9771    auto surface_state = GetSurfaceState(instance_data, surface);
9772
9773    skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2ee009ea,
9774                                              "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
9775
9776    lock.unlock();
9777
9778    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9779
9780    auto result =
9781        instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
9782
9783    if (result == VK_SUCCESS) {
9784        surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
9785    }
9786
9787    return result;
9788}
9789
9790VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9791                                                                       uint32_t *pPresentModeCount,
9792                                                                       VkPresentModeKHR *pPresentModes) {
9793    bool skip = false;
9794    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9795    unique_lock_t lock(global_lock);
9796    // TODO: this isn't quite right. available modes may differ by surface AND physical device.
9797    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9798    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
9799
9800    if (pPresentModes) {
9801        // Compare the preliminary value of *pPresentModeCount with the value this time:
9802        auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
9803        switch (call_state) {
9804            case UNCALLED:
9805                skip |= log_msg(
9806                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9807                    HandleToUint64(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
9808                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
9809                    "value has been seen for pPresentModeCount.");
9810                break;
9811            default:
9812                // both query count and query details
9813                if (*pPresentModeCount != prev_mode_count) {
9814                    skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9815                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
9816                                    DEVLIMITS_COUNT_MISMATCH, "DL",
9817                                    "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that "
9818                                    "differs from the value "
9819                                    "(%u) that was returned when pPresentModes was NULL.",
9820                                    *pPresentModeCount, prev_mode_count);
9821                }
9822                break;
9823        }
9824    }
9825    lock.unlock();
9826
9827    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9828
9829    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
9830                                                                                        pPresentModes);
9831
9832    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9833        lock.lock();
9834
9835        if (*pPresentModeCount) {
9836            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
9837            if (*pPresentModeCount > physical_device_state->present_modes.size())
9838                physical_device_state->present_modes.resize(*pPresentModeCount);
9839        }
9840        if (pPresentModes) {
9841            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
9842            for (uint32_t i = 0; i < *pPresentModeCount; i++) {
9843                physical_device_state->present_modes[i] = pPresentModes[i];
9844            }
9845        }
9846    }
9847
9848    return result;
9849}
9850
9851VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
9852                                                                  uint32_t *pSurfaceFormatCount,
9853                                                                  VkSurfaceFormatKHR *pSurfaceFormats) {
9854    bool skip = false;
9855    auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9856    unique_lock_t lock(global_lock);
9857    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
9858    auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
9859
9860    if (pSurfaceFormats) {
9861        auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
9862
9863        switch (call_state) {
9864            case UNCALLED:
9865                // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
9866                // didn't
9867                // previously call this function with a NULL value of pSurfaceFormats:
9868                skip |= log_msg(
9869                    instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
9870                    HandleToUint64(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
9871                    "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
9872                    "value has been seen for pSurfaceFormats.");
9873                break;
9874            default:
9875                if (prev_format_count != *pSurfaceFormatCount) {
9876                    skip |= log_msg(
9877                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9878                        VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
9879                        DEVLIMITS_COUNT_MISMATCH, "DL",
9880                        "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats "
9881                        "set "
9882                        "to "
9883                        "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
9884                        *pSurfaceFormatCount, prev_format_count);
9885                }
9886                break;
9887        }
9888    }
9889    lock.unlock();
9890
9891    if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9892
9893    // Call down the call chain:
9894    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
9895                                                                                   pSurfaceFormats);
9896
9897    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9898        lock.lock();
9899
9900        if (*pSurfaceFormatCount) {
9901            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
9902            if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
9903                physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
9904        }
9905        if (pSurfaceFormats) {
9906            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
9907            for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
9908                physical_device_state->surface_formats[i] = pSurfaceFormats[i];
9909            }
9910        }
9911    }
9912    return result;
9913}
9914
9915static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
9916                                                              uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
9917    unique_lock_t lock(global_lock);
9918    auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
9919    if (*pSurfaceFormatCount) {
9920        if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
9921            physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
9922        }
9923        if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
9924            physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
9925    }
9926    if (pSurfaceFormats) {
9927        if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
9928            physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
9929        }
9930        for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
9931            physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
9932        }
9933    }
9934}
9935
9936VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
9937                                                                   const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
9938                                                                   uint32_t *pSurfaceFormatCount,
9939                                                                   VkSurfaceFormat2KHR *pSurfaceFormats) {
9940    auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
9941    auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
9942                                                                                   pSurfaceFormatCount, pSurfaceFormats);
9943    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
9944        PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
9945    }
9946    return result;
9947}
9948
9949VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
9950                                                            const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
9951                                                            const VkAllocationCallbacks *pAllocator,
9952                                                            VkDebugReportCallbackEXT *pMsgCallback) {
9953    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9954    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
9955    if (VK_SUCCESS == res) {
9956        lock_guard_t lock(global_lock);
9957        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
9958    }
9959    return res;
9960}
9961
9962VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
9963                                                         const VkAllocationCallbacks *pAllocator) {
9964    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9965    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
9966    lock_guard_t lock(global_lock);
9967    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
9968}
9969
9970VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
9971                                                 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
9972                                                 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
9973    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
9974    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
9975}
9976
9977VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
9978    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
9979}
9980
9981VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
9982                                                              VkLayerProperties *pProperties) {
9983    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
9984}
9985
9986VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
9987                                                                    VkExtensionProperties *pProperties) {
9988    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
9989        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
9990
9991    return VK_ERROR_LAYER_NOT_PRESENT;
9992}
9993
9994VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
9995                                                                  uint32_t *pCount, VkExtensionProperties *pProperties) {
9996    if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) return util_GetExtensionProperties(0, NULL, pCount, pProperties);
9997
9998    assert(physicalDevice);
9999
10000    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10001    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
10002}
10003
10004VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHX(
10005    VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHX *pPhysicalDeviceGroupProperties) {
10006    bool skip = false;
10007    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10008
10009    if (instance_data) {
10010        // For this instance, flag when EnumeratePhysicalDeviceGroupsKHX goes to QUERY_COUNT and then QUERY_DETAILS.
10011        if (NULL == pPhysicalDeviceGroupProperties) {
10012            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
10013        } else {
10014            if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
10015                // Flag warning here. You can call this without having queried the count, but it may not be
10016                // robust on platforms with multiple physical devices.
10017                skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10018                                VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
10019                                "Call sequence has vkEnumeratePhysicalDeviceGroupsKHX() w/ non-NULL "
10020                                "pPhysicalDeviceGroupProperties. You should first "
10021                                "call vkEnumeratePhysicalDeviceGroupsKHX() w/ NULL pPhysicalDeviceGroupProperties to query "
10022                                "pPhysicalDeviceGroupCount.");
10023            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
10024            else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
10025                // Having actual count match count from app is not a requirement, so this can be a warning
10026                skip |=
10027                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10028                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
10029                            "Call to vkEnumeratePhysicalDeviceGroupsKHX() w/ pPhysicalDeviceGroupCount value %u, but actual count "
10030                            "supported by this instance is %u.",
10031                            *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
10032            }
10033            instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
10034        }
10035        if (skip) {
10036            return VK_ERROR_VALIDATION_FAILED_EXT;
10037        }
10038        VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHX(instance, pPhysicalDeviceGroupCount,
10039            pPhysicalDeviceGroupProperties);
10040        if (NULL == pPhysicalDeviceGroupProperties) {
10041            instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
10042        } else if (result == VK_SUCCESS) { // Save physical devices
10043            for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
10044                for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
10045                    VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
10046                    auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
10047                    phys_device_state.phys_device = cur_phys_dev;
10048                    // Init actual features for each physical device
10049                    instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
10050                }
10051            }
10052        }
10053        return result;
10054    } else {
10055        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
10056                DEVLIMITS_INVALID_INSTANCE, "DL",
10057                "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDeviceGroupsKHX().",
10058                HandleToUint64(instance));
10059    }
10060    return VK_ERROR_VALIDATION_FAILED_EXT;
10061}
10062
10063VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
10064                                                                 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
10065                                                                 const VkAllocationCallbacks *pAllocator,
10066                                                                 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
10067    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10068    VkResult result =
10069        dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
10070    if (VK_SUCCESS == result) {
10071        lock_guard_t lock(global_lock);
10072        // Shadow template createInfo for later updates
10073        safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info =
10074            new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo);
10075        std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
10076        dev_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
10077    }
10078    return result;
10079}
10080
10081VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
10082                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10083                                                              const VkAllocationCallbacks *pAllocator) {
10084    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10085    unique_lock_t lock(global_lock);
10086    dev_data->desc_template_map.erase(descriptorUpdateTemplate);
10087    lock.unlock();
10088    dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
10089}
10090
10091// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
10092static void PostCallRecordUpdateDescriptorSetWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
10093                                                             VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10094                                                             const void *pData) {
10095    auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
10096    if (template_map_entry == device_data->desc_template_map.end()) {
10097        assert(0);
10098    }
10099
10100    cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
10101}
10102
10103VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
10104                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10105                                                              const void *pData) {
10106    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10107    device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
10108
10109    PostCallRecordUpdateDescriptorSetWithTemplateKHR(device_data, descriptorSet, descriptorUpdateTemplate, pData);
10110}
10111
10112VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
10113                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
10114                                                               VkPipelineLayout layout, uint32_t set, const void *pData) {
10115    layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10116    dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
10117}
10118
10119static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
10120                                                                     VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
10121                                                                     VkDisplayPlanePropertiesKHR *pProperties) {
10122    unique_lock_t lock(global_lock);
10123    auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
10124
10125    if (*pPropertyCount) {
10126        if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
10127            physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
10128        }
10129        physical_device_state->display_plane_property_count = *pPropertyCount;
10130    }
10131    if (pProperties) {
10132        if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
10133            physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
10134        }
10135    }
10136}
10137
10138VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
10139                                                                          VkDisplayPlanePropertiesKHR *pProperties) {
10140    VkResult result = VK_SUCCESS;
10141    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10142
10143    result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
10144
10145    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
10146        PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
10147    }
10148
10149    return result;
10150}
10151
10152static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
10153                                                                    VkPhysicalDevice physicalDevice, uint32_t planeIndex,
10154                                                                    const char *api_name) {
10155    bool skip = false;
10156    auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
10157    if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
10158        skip |= log_msg(
10159            instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10160            HandleToUint64(physicalDevice), __LINE__, SWAPCHAIN_GET_SUPPORTED_DISPLAYS_WITHOUT_QUERY, "DL",
10161            "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR.", api_name);
10162    } else {
10163        if (planeIndex >= physical_device_state->display_plane_property_count) {
10164            skip |= log_msg(
10165                instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10166                HandleToUint64(physicalDevice), __LINE__, VALIDATION_ERROR_29c009c2, "DL",
10167                "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR. "
10168                "Do you have the plane index hardcoded? %s",
10169                api_name, physical_device_state->display_plane_property_count - 1, validation_error_map[VALIDATION_ERROR_29c009c2]);
10170        }
10171    }
10172    return skip;
10173}
10174
10175static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
10176                                                               uint32_t planeIndex) {
10177    bool skip = false;
10178    lock_guard_t lock(global_lock);
10179    skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
10180                                                                    "vkGetDisplayPlaneSupportedDisplaysKHR");
10181    return skip;
10182}
10183
10184VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
10185                                                                   uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
10186    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10187    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10188    bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
10189    if (!skip) {
10190        result =
10191            instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
10192    }
10193    return result;
10194}
10195
10196static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
10197                                                          uint32_t planeIndex) {
10198    bool skip = false;
10199    lock_guard_t lock(global_lock);
10200    skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
10201                                                                    "vkGetDisplayPlaneCapabilitiesKHR");
10202    return skip;
10203}
10204
10205VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
10206                                                              uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
10207    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10208    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10209    bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
10210
10211    if (!skip) {
10212        result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
10213    }
10214
10215    return result;
10216}
10217
10218VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
10219    std::unique_lock<std::mutex> lock(global_lock);
10220    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10221    if (pNameInfo->pObjectName) {
10222        device_data->report_data->debugObjectNameMap->insert(
10223            std::make_pair<uint64_t, std::string>((uint64_t &&)pNameInfo->object, pNameInfo->pObjectName));
10224    } else {
10225        device_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
10226    }
10227    lock.unlock();
10228    VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
10229    return result;
10230}
10231
10232VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
10233    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10234    VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
10235    return result;
10236}
10237
10238VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
10239    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10240    device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
10241}
10242
10243VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
10244    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10245    device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
10246}
10247
10248VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
10249    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10250    device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
10251}
10252
10253VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
10254VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
10255VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
10256
10257// Map of all APIs to be intercepted by this layer
10258static const std::unordered_map<std::string, void*> name_to_funcptr_map = {
10259    {"vkGetInstanceProcAddr", (void*)GetInstanceProcAddr},
10260    {"vk_layerGetPhysicalDeviceProcAddr", (void*)GetPhysicalDeviceProcAddr},
10261    {"vkGetDeviceProcAddr", (void*)GetDeviceProcAddr},
10262    {"vkCreateInstance", (void*)CreateInstance},
10263    {"vkCreateDevice", (void*)CreateDevice},
10264    {"vkEnumeratePhysicalDevices", (void*)EnumeratePhysicalDevices},
10265    {"vkGetPhysicalDeviceQueueFamilyProperties", (void*)GetPhysicalDeviceQueueFamilyProperties},
10266    {"vkDestroyInstance", (void*)DestroyInstance},
10267    {"vkEnumerateInstanceLayerProperties", (void*)EnumerateInstanceLayerProperties},
10268    {"vkEnumerateDeviceLayerProperties", (void*)EnumerateDeviceLayerProperties},
10269    {"vkEnumerateInstanceExtensionProperties", (void*)EnumerateInstanceExtensionProperties},
10270    {"vkEnumerateDeviceExtensionProperties", (void*)EnumerateDeviceExtensionProperties},
10271    {"vkCreateDescriptorUpdateTemplateKHR", (void*)CreateDescriptorUpdateTemplateKHR},
10272    {"vkDestroyDescriptorUpdateTemplateKHR", (void*)DestroyDescriptorUpdateTemplateKHR},
10273    {"vkUpdateDescriptorSetWithTemplateKHR", (void*)UpdateDescriptorSetWithTemplateKHR},
10274    {"vkCmdPushDescriptorSetWithTemplateKHR", (void*)CmdPushDescriptorSetWithTemplateKHR},
10275    {"vkCreateSwapchainKHR", (void*)CreateSwapchainKHR},
10276    {"vkDestroySwapchainKHR", (void*)DestroySwapchainKHR},
10277    {"vkGetSwapchainImagesKHR", (void*)GetSwapchainImagesKHR},
10278    {"vkAcquireNextImageKHR", (void*)AcquireNextImageKHR},
10279    {"vkQueuePresentKHR", (void*)QueuePresentKHR},
10280    {"vkQueueSubmit", (void*)QueueSubmit},
10281    {"vkWaitForFences", (void*)WaitForFences},
10282    {"vkGetFenceStatus", (void*)GetFenceStatus},
10283    {"vkQueueWaitIdle", (void*)QueueWaitIdle},
10284    {"vkDeviceWaitIdle", (void*)DeviceWaitIdle},
10285    {"vkGetDeviceQueue", (void*)GetDeviceQueue},
10286    {"vkDestroyDevice", (void*)DestroyDevice},
10287    {"vkDestroyFence", (void*)DestroyFence},
10288    {"vkResetFences", (void*)ResetFences},
10289    {"vkDestroySemaphore", (void*)DestroySemaphore},
10290    {"vkDestroyEvent", (void*)DestroyEvent},
10291    {"vkDestroyQueryPool", (void*)DestroyQueryPool},
10292    {"vkDestroyBuffer", (void*)DestroyBuffer},
10293    {"vkDestroyBufferView", (void*)DestroyBufferView},
10294    {"vkDestroyImage", (void*)DestroyImage},
10295    {"vkDestroyImageView", (void*)DestroyImageView},
10296    {"vkDestroyShaderModule", (void*)DestroyShaderModule},
10297    {"vkDestroyPipeline", (void*)DestroyPipeline},
10298    {"vkDestroyPipelineLayout", (void*)DestroyPipelineLayout},
10299    {"vkDestroySampler", (void*)DestroySampler},
10300    {"vkDestroyDescriptorSetLayout", (void*)DestroyDescriptorSetLayout},
10301    {"vkDestroyDescriptorPool", (void*)DestroyDescriptorPool},
10302    {"vkDestroyFramebuffer", (void*)DestroyFramebuffer},
10303    {"vkDestroyRenderPass", (void*)DestroyRenderPass},
10304    {"vkCreateBuffer", (void*)CreateBuffer},
10305    {"vkCreateBufferView", (void*)CreateBufferView},
10306    {"vkCreateImage", (void*)CreateImage},
10307    {"vkCreateImageView", (void*)CreateImageView},
10308    {"vkCreateFence", (void*)CreateFence},
10309    {"vkCreatePipelineCache", (void*)CreatePipelineCache},
10310    {"vkDestroyPipelineCache", (void*)DestroyPipelineCache},
10311    {"vkGetPipelineCacheData", (void*)GetPipelineCacheData},
10312    {"vkMergePipelineCaches", (void*)MergePipelineCaches},
10313    {"vkCreateGraphicsPipelines", (void*)CreateGraphicsPipelines},
10314    {"vkCreateComputePipelines", (void*)CreateComputePipelines},
10315    {"vkCreateSampler", (void*)CreateSampler},
10316    {"vkCreateDescriptorSetLayout", (void*)CreateDescriptorSetLayout},
10317    {"vkCreatePipelineLayout", (void*)CreatePipelineLayout},
10318    {"vkCreateDescriptorPool", (void*)CreateDescriptorPool},
10319    {"vkResetDescriptorPool", (void*)ResetDescriptorPool},
10320    {"vkAllocateDescriptorSets", (void*)AllocateDescriptorSets},
10321    {"vkFreeDescriptorSets", (void*)FreeDescriptorSets},
10322    {"vkUpdateDescriptorSets", (void*)UpdateDescriptorSets},
10323    {"vkCreateCommandPool", (void*)CreateCommandPool},
10324    {"vkDestroyCommandPool", (void*)DestroyCommandPool},
10325    {"vkResetCommandPool", (void*)ResetCommandPool},
10326    {"vkCreateQueryPool", (void*)CreateQueryPool},
10327    {"vkAllocateCommandBuffers", (void*)AllocateCommandBuffers},
10328    {"vkFreeCommandBuffers", (void*)FreeCommandBuffers},
10329    {"vkBeginCommandBuffer", (void*)BeginCommandBuffer},
10330    {"vkEndCommandBuffer", (void*)EndCommandBuffer},
10331    {"vkResetCommandBuffer", (void*)ResetCommandBuffer},
10332    {"vkCmdBindPipeline", (void*)CmdBindPipeline},
10333    {"vkCmdSetViewport", (void*)CmdSetViewport},
10334    {"vkCmdSetScissor", (void*)CmdSetScissor},
10335    {"vkCmdSetLineWidth", (void*)CmdSetLineWidth},
10336    {"vkCmdSetDepthBias", (void*)CmdSetDepthBias},
10337    {"vkCmdSetBlendConstants", (void*)CmdSetBlendConstants},
10338    {"vkCmdSetDepthBounds", (void*)CmdSetDepthBounds},
10339    {"vkCmdSetStencilCompareMask", (void*)CmdSetStencilCompareMask},
10340    {"vkCmdSetStencilWriteMask", (void*)CmdSetStencilWriteMask},
10341    {"vkCmdSetStencilReference", (void*)CmdSetStencilReference},
10342    {"vkCmdBindDescriptorSets", (void*)CmdBindDescriptorSets},
10343    {"vkCmdBindVertexBuffers", (void*)CmdBindVertexBuffers},
10344    {"vkCmdBindIndexBuffer", (void*)CmdBindIndexBuffer},
10345    {"vkCmdDraw", (void*)CmdDraw},
10346    {"vkCmdDrawIndexed", (void*)CmdDrawIndexed},
10347    {"vkCmdDrawIndirect", (void*)CmdDrawIndirect},
10348    {"vkCmdDrawIndexedIndirect", (void*)CmdDrawIndexedIndirect},
10349    {"vkCmdDispatch", (void*)CmdDispatch},
10350    {"vkCmdDispatchIndirect", (void*)CmdDispatchIndirect},
10351    {"vkCmdCopyBuffer", (void*)CmdCopyBuffer},
10352    {"vkCmdCopyImage", (void*)CmdCopyImage},
10353    {"vkCmdBlitImage", (void*)CmdBlitImage},
10354    {"vkCmdCopyBufferToImage", (void*)CmdCopyBufferToImage},
10355    {"vkCmdCopyImageToBuffer", (void*)CmdCopyImageToBuffer},
10356    {"vkCmdUpdateBuffer", (void*)CmdUpdateBuffer},
10357    {"vkCmdFillBuffer", (void*)CmdFillBuffer},
10358    {"vkCmdClearColorImage", (void*)CmdClearColorImage},
10359    {"vkCmdClearDepthStencilImage", (void*)CmdClearDepthStencilImage},
10360    {"vkCmdClearAttachments", (void*)CmdClearAttachments},
10361    {"vkCmdResolveImage", (void*)CmdResolveImage},
10362    {"vkGetImageSubresourceLayout", (void*)GetImageSubresourceLayout},
10363    {"vkCmdSetEvent", (void*)CmdSetEvent},
10364    {"vkCmdResetEvent", (void*)CmdResetEvent},
10365    {"vkCmdWaitEvents", (void*)CmdWaitEvents},
10366    {"vkCmdPipelineBarrier", (void*)CmdPipelineBarrier},
10367    {"vkCmdBeginQuery", (void*)CmdBeginQuery},
10368    {"vkCmdEndQuery", (void*)CmdEndQuery},
10369    {"vkCmdResetQueryPool", (void*)CmdResetQueryPool},
10370    {"vkCmdCopyQueryPoolResults", (void*)CmdCopyQueryPoolResults},
10371    {"vkCmdPushConstants", (void*)CmdPushConstants},
10372    {"vkCmdWriteTimestamp", (void*)CmdWriteTimestamp},
10373    {"vkCreateFramebuffer", (void*)CreateFramebuffer},
10374    {"vkCreateShaderModule", (void*)CreateShaderModule},
10375    {"vkCreateRenderPass", (void*)CreateRenderPass},
10376    {"vkCmdBeginRenderPass", (void*)CmdBeginRenderPass},
10377    {"vkCmdNextSubpass", (void*)CmdNextSubpass},
10378    {"vkCmdEndRenderPass", (void*)CmdEndRenderPass},
10379    {"vkCmdExecuteCommands", (void*)CmdExecuteCommands},
10380    {"vkCmdDebugMarkerBeginEXT", (void*)CmdDebugMarkerBeginEXT},
10381    {"vkCmdDebugMarkerEndEXT", (void*)CmdDebugMarkerEndEXT},
10382    {"vkCmdDebugMarkerInsertEXT", (void*)CmdDebugMarkerInsertEXT},
10383    {"vkDebugMarkerSetObjectNameEXT", (void*)DebugMarkerSetObjectNameEXT},
10384    {"vkDebugMarkerSetObjectTagEXT", (void*)DebugMarkerSetObjectTagEXT},
10385    {"vkSetEvent", (void*)SetEvent},
10386    {"vkMapMemory", (void*)MapMemory},
10387    {"vkUnmapMemory", (void*)UnmapMemory},
10388    {"vkFlushMappedMemoryRanges", (void*)FlushMappedMemoryRanges},
10389    {"vkInvalidateMappedMemoryRanges", (void*)InvalidateMappedMemoryRanges},
10390    {"vkAllocateMemory", (void*)AllocateMemory},
10391    {"vkFreeMemory", (void*)FreeMemory},
10392    {"vkBindBufferMemory", (void*)BindBufferMemory},
10393    {"vkGetBufferMemoryRequirements", (void*)GetBufferMemoryRequirements},
10394    {"vkGetImageMemoryRequirements", (void*)GetImageMemoryRequirements},
10395    {"vkGetQueryPoolResults", (void*)GetQueryPoolResults},
10396    {"vkBindImageMemory", (void*)BindImageMemory},
10397    {"vkQueueBindSparse", (void*)QueueBindSparse},
10398    {"vkCreateSemaphore", (void*)CreateSemaphore},
10399    {"vkCreateEvent", (void*)CreateEvent},
10400#ifdef VK_USE_PLATFORM_ANDROID_KHR
10401    {"vkCreateAndroidSurfaceKHR", (void*)CreateAndroidSurfaceKHR},
10402#endif
10403#ifdef VK_USE_PLATFORM_MIR_KHR
10404    {"vkCreateMirSurfaceKHR", (void*)CreateMirSurfaceKHR},
10405    {"vkGetPhysicalDeviceMirPresentationSupportKHR", (void*)GetPhysicalDeviceMirPresentationSupportKHR},
10406#endif
10407#ifdef VK_USE_PLATFORM_WAYLAND_KHR
10408    {"vkCreateWaylandSurfaceKHR", (void*)CreateWaylandSurfaceKHR},
10409    {"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void*)GetPhysicalDeviceWaylandPresentationSupportKHR},
10410#endif
10411#ifdef VK_USE_PLATFORM_WIN32_KHR
10412    {"vkCreateWin32SurfaceKHR", (void*)CreateWin32SurfaceKHR},
10413    {"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void*)GetPhysicalDeviceWin32PresentationSupportKHR},
10414#endif
10415#ifdef VK_USE_PLATFORM_XCB_KHR
10416    {"vkCreateXcbSurfaceKHR", (void*)CreateXcbSurfaceKHR},
10417    {"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void*)GetPhysicalDeviceXcbPresentationSupportKHR},
10418#endif
10419#ifdef VK_USE_PLATFORM_XLIB_KHR
10420    {"vkCreateXlibSurfaceKHR", (void*)CreateXlibSurfaceKHR},
10421    {"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void*)GetPhysicalDeviceXlibPresentationSupportKHR},
10422#endif
10423    {"vkCreateDisplayPlaneSurfaceKHR", (void*)CreateDisplayPlaneSurfaceKHR},
10424    {"vkDestroySurfaceKHR", (void*)DestroySurfaceKHR},
10425    {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void*)GetPhysicalDeviceSurfaceCapabilitiesKHR},
10426    {"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void*)GetPhysicalDeviceSurfaceCapabilities2KHR},
10427    {"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void*)GetPhysicalDeviceSurfaceCapabilities2EXT},
10428    {"vkGetPhysicalDeviceSurfaceSupportKHR", (void*)GetPhysicalDeviceSurfaceSupportKHR},
10429    {"vkGetPhysicalDeviceSurfacePresentModesKHR", (void*)GetPhysicalDeviceSurfacePresentModesKHR},
10430    {"vkGetPhysicalDeviceSurfaceFormatsKHR", (void*)GetPhysicalDeviceSurfaceFormatsKHR},
10431    {"vkGetPhysicalDeviceSurfaceFormats2KHR", (void*)GetPhysicalDeviceSurfaceFormats2KHR},
10432    {"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void*)GetPhysicalDeviceQueueFamilyProperties2KHR},
10433    {"vkEnumeratePhysicalDeviceGroupsKHX", (void*)EnumeratePhysicalDeviceGroupsKHX},
10434    {"vkCreateDebugReportCallbackEXT", (void*)CreateDebugReportCallbackEXT},
10435    {"vkDestroyDebugReportCallbackEXT", (void*)DestroyDebugReportCallbackEXT},
10436    {"vkDebugReportMessageEXT", (void*)DebugReportMessageEXT},
10437    {"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void*)GetPhysicalDeviceDisplayPlanePropertiesKHR},
10438    {"GetDisplayPlaneSupportedDisplaysKHR", (void*)GetDisplayPlaneSupportedDisplaysKHR},
10439    {"GetDisplayPlaneCapabilitiesKHR", (void*)GetDisplayPlaneCapabilitiesKHR},
10440};
10441
10442VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
10443    assert(device);
10444    layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10445
10446    // Is API to be intercepted by this layer?
10447    const auto &item = name_to_funcptr_map.find(funcName);
10448    if (item != name_to_funcptr_map.end()) {
10449        return reinterpret_cast<PFN_vkVoidFunction>(item->second);
10450    }
10451
10452    auto &table = device_data->dispatch_table;
10453    if (!table.GetDeviceProcAddr) return nullptr;
10454    return table.GetDeviceProcAddr(device, funcName);
10455}
10456
10457VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
10458    instance_layer_data *instance_data;
10459    // Is API to be intercepted by this layer?
10460    const auto &item = name_to_funcptr_map.find(funcName);
10461    if (item != name_to_funcptr_map.end()) {
10462        return reinterpret_cast<PFN_vkVoidFunction>(item->second);
10463    }
10464
10465    instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10466    auto &table = instance_data->dispatch_table;
10467    if (!table.GetInstanceProcAddr) return nullptr;
10468    return table.GetInstanceProcAddr(instance, funcName);
10469}
10470
10471VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
10472    assert(instance);
10473    instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10474
10475    auto &table = instance_data->dispatch_table;
10476    if (!table.GetPhysicalDeviceProcAddr) return nullptr;
10477    return table.GetPhysicalDeviceProcAddr(instance, funcName);
10478}
10479
10480}  // namespace core_validation
10481
10482// loader-layer interface v0, just wrappers since there is only a layer
10483
10484VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
10485                                                                                      VkExtensionProperties *pProperties) {
10486    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
10487}
10488
10489VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
10490                                                                                  VkLayerProperties *pProperties) {
10491    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
10492}
10493
10494VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
10495                                                                                VkLayerProperties *pProperties) {
10496    // the layer command handles VK_NULL_HANDLE just fine internally
10497    assert(physicalDevice == VK_NULL_HANDLE);
10498    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
10499}
10500
10501VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10502                                                                                    const char *pLayerName, uint32_t *pCount,
10503                                                                                    VkExtensionProperties *pProperties) {
10504    // the layer command handles VK_NULL_HANDLE just fine internally
10505    assert(physicalDevice == VK_NULL_HANDLE);
10506    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10507}
10508
10509VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10510    return core_validation::GetDeviceProcAddr(dev, funcName);
10511}
10512
10513VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10514    return core_validation::GetInstanceProcAddr(instance, funcName);
10515}
10516
10517VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
10518                                                                                           const char *funcName) {
10519    return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
10520}
10521
10522VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
10523    assert(pVersionStruct != NULL);
10524    assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
10525
10526    // Fill in the function pointers if our version is at least capable of having the structure contain them.
10527    if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
10528        pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
10529        pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
10530        pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
10531    }
10532
10533    if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
10534        core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
10535    } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
10536        pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
10537    }
10538
10539    return VK_SUCCESS;
10540}
10541