core_validation.cpp revision 60cbbd8f316f5dfc7997bb66833a2e624f832e37
1/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
34 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
39#include <SPIRV/spirv.hpp>
40#include <algorithm>
41#include <assert.h>
42#include <iostream>
43#include <list>
44#include <map>
45#include <mutex>
46#include <set>
47#include <sstream>
48#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
51#include <string>
52#include <tuple>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_enum_string_helper.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "core_validation.h"
64#include "vk_layer_table.h"
65#include "vk_layer_data.h"
66#include "vk_layer_extension_utils.h"
67#include "vk_layer_utils.h"
68#include "spirv-tools/libspirv.h"
69
70#if defined __ANDROID__
71#include <android/log.h>
72#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
73#else
74#define LOGCONSOLE(...)                                                                                                            \
75    {                                                                                                                              \
76        printf(__VA_ARGS__);                                                                                                       \
77        printf("\n");                                                                                                              \
78    }
79#endif
80
81// This intentionally includes a cpp file
82#include "vk_safe_struct.cpp"
83
84using namespace std;
85
86namespace core_validation {
87
88using std::unordered_map;
89using std::unordered_set;
90
91// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
92// Object value will be used to identify them internally.
93static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
94// 2nd special memory handle used to flag object as unbound from memory
95static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
96
97// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
98// by the extent of a swapchain targeting the surface.
99static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
100
101struct devExts {
102    bool wsi_enabled;
103    bool wsi_display_swapchain_enabled;
104    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
105    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
106};
107
108// fwd decls
109struct shader_module;
110
111struct instance_layer_data {
112    VkInstance instance = VK_NULL_HANDLE;
113    debug_report_data *report_data = nullptr;
114    std::vector<VkDebugReportCallbackEXT> logging_callback;
115    VkLayerInstanceDispatchTable dispatch_table;
116
117    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
118    uint32_t physical_devices_count = 0;
119    CHECK_DISABLED disabled = {};
120
121    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
122    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
123
124    bool surfaceExtensionEnabled = false;
125    bool displayExtensionEnabled = false;
126    bool androidSurfaceExtensionEnabled = false;
127    bool mirSurfaceExtensionEnabled = false;
128    bool waylandSurfaceExtensionEnabled = false;
129    bool win32SurfaceExtensionEnabled = false;
130    bool xcbSurfaceExtensionEnabled = false;
131    bool xlibSurfaceExtensionEnabled = false;
132};
133
134struct layer_data {
135    debug_report_data *report_data = nullptr;
136    VkLayerDispatchTable dispatch_table;
137
138    devExts device_extensions = {};
139    unordered_set<VkQueue> queues;  // All queues under given device
140    // Global set of all cmdBuffers that are inFlight on this device
141    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
142    // Layer specific data
143    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
144    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
145    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
146    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
147    unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
148    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
149    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
150    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
151    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
152    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
153    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
154    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
155    unordered_map<VkFence, FENCE_NODE> fenceMap;
156    unordered_map<VkQueue, QUEUE_STATE> queueMap;
157    unordered_map<VkEvent, EVENT_STATE> eventMap;
158    unordered_map<QueryObject, bool> queryToStateMap;
159    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
160    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
161    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
162    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
163    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
164    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
165    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
166    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
167
168    VkDevice device = VK_NULL_HANDLE;
169    VkPhysicalDevice physical_device = VK_NULL_HANDLE;
170
171    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
172
173    VkPhysicalDeviceFeatures enabled_features = {};
174    // Device specific data
175    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
176    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
177};
178
179// TODO : Do we need to guard access to layer_data_map w/ lock?
180static unordered_map<void *, layer_data *> layer_data_map;
181static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
182
183static const VkLayerProperties global_layer = {
184    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
185};
186
187template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
188    bool foundLayer = false;
189    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
190        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
191            foundLayer = true;
192        }
193        // This has to be logged to console as we don't have a callback at this point.
194        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
195            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
196                       global_layer.layerName);
197        }
198    }
199}
200
201// Code imported from shader_checker
202static void build_def_index(shader_module *);
203
204// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
205// without the caller needing to care too much about the physical SPIRV module layout.
206struct spirv_inst_iter {
207    std::vector<uint32_t>::const_iterator zero;
208    std::vector<uint32_t>::const_iterator it;
209
210    uint32_t len() {
211        auto result = *it >> 16;
212        assert(result > 0);
213        return result;
214    }
215
216    uint32_t opcode() { return *it & 0x0ffffu; }
217
218    uint32_t const &word(unsigned n) {
219        assert(n < len());
220        return it[n];
221    }
222
223    uint32_t offset() { return (uint32_t)(it - zero); }
224
225    spirv_inst_iter() {}
226
227    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
228
229    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
230
231    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
232
233    spirv_inst_iter operator++(int) { // x++
234        spirv_inst_iter ii = *this;
235        it += len();
236        return ii;
237    }
238
239    spirv_inst_iter operator++() { // ++x;
240        it += len();
241        return *this;
242    }
243
244    // The iterator and the value are the same thing.
245    spirv_inst_iter &operator*() { return *this; }
246    spirv_inst_iter const &operator*() const { return *this; }
247};
248
249struct shader_module {
250    // The spirv image itself
251    vector<uint32_t> words;
252    // A mapping of <id> to the first word of its def. this is useful because walking type
253    // trees, constant expressions, etc requires jumping all over the instruction stream.
254    unordered_map<unsigned, unsigned> def_index;
255
256    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
257        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
258          def_index() {
259
260        build_def_index(this);
261    }
262
263    // Expose begin() / end() to enable range-based for
264    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } // First insn
265    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         // Just past last insn
266    // Given an offset into the module, produce an iterator there.
267    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
268
269    // Gets an iterator to the definition of an id
270    spirv_inst_iter get_def(unsigned id) const {
271        auto it = def_index.find(id);
272        if (it == def_index.end()) {
273            return end();
274        }
275        return at(it->second);
276    }
277};
278
279// TODO : This can be much smarter, using separate locks for separate global data
280static std::mutex global_lock;
281
282// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
283IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
284    auto iv_it = dev_data->imageViewMap.find(image_view);
285    if (iv_it == dev_data->imageViewMap.end()) {
286        return nullptr;
287    }
288    return iv_it->second.get();
289}
290// Return sampler node ptr for specified sampler or else NULL
291SAMPLER_STATE *getSamplerState(const layer_data *dev_data, VkSampler sampler) {
292    auto sampler_it = dev_data->samplerMap.find(sampler);
293    if (sampler_it == dev_data->samplerMap.end()) {
294        return nullptr;
295    }
296    return sampler_it->second.get();
297}
298// Return image state ptr for specified image or else NULL
299IMAGE_STATE *getImageState(const layer_data *dev_data, VkImage image) {
300    auto img_it = dev_data->imageMap.find(image);
301    if (img_it == dev_data->imageMap.end()) {
302        return nullptr;
303    }
304    return img_it->second.get();
305}
306// Return buffer state ptr for specified buffer or else NULL
307BUFFER_STATE *getBufferState(const layer_data *dev_data, VkBuffer buffer) {
308    auto buff_it = dev_data->bufferMap.find(buffer);
309    if (buff_it == dev_data->bufferMap.end()) {
310        return nullptr;
311    }
312    return buff_it->second.get();
313}
314// Return swapchain node for specified swapchain or else NULL
315SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
316    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
317    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
318        return nullptr;
319    }
320    return swp_it->second.get();
321}
322// Return swapchain for specified image or else NULL
323VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
324    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
325    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
326        return VK_NULL_HANDLE;
327    }
328    return img_it->second;
329}
330// Return buffer node ptr for specified buffer or else NULL
331BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
332    auto bv_it = my_data->bufferViewMap.find(buffer_view);
333    if (bv_it == my_data->bufferViewMap.end()) {
334        return nullptr;
335    }
336    return bv_it->second.get();
337}
338
339FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
340    auto it = dev_data->fenceMap.find(fence);
341    if (it == dev_data->fenceMap.end()) {
342        return nullptr;
343    }
344    return &it->second;
345}
346
347EVENT_STATE *getEventNode(layer_data *dev_data, VkEvent event) {
348    auto it = dev_data->eventMap.find(event);
349    if (it == dev_data->eventMap.end()) {
350        return nullptr;
351    }
352    return &it->second;
353}
354
355QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
356    auto it = dev_data->queryPoolMap.find(query_pool);
357    if (it == dev_data->queryPoolMap.end()) {
358        return nullptr;
359    }
360    return &it->second;
361}
362
363QUEUE_STATE *getQueueState(layer_data *dev_data, VkQueue queue) {
364    auto it = dev_data->queueMap.find(queue);
365    if (it == dev_data->queueMap.end()) {
366        return nullptr;
367    }
368    return &it->second;
369}
370
371SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
372    auto it = dev_data->semaphoreMap.find(semaphore);
373    if (it == dev_data->semaphoreMap.end()) {
374        return nullptr;
375    }
376    return &it->second;
377}
378
379COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
380    auto it = dev_data->commandPoolMap.find(pool);
381    if (it == dev_data->commandPoolMap.end()) {
382        return nullptr;
383    }
384    return &it->second;
385}
386
387PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
388    auto it = instance_data->physical_device_map.find(phys);
389    if (it == instance_data->physical_device_map.end()) {
390        return nullptr;
391    }
392    return &it->second;
393}
394
395SURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
396    auto it = instance_data->surface_map.find(surface);
397    if (it == instance_data->surface_map.end()) {
398        return nullptr;
399    }
400    return &it->second;
401}
402
403// Return ptr to memory binding for given handle of specified type
404static BINDABLE *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
405    switch (type) {
406    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
407        return getImageState(my_data, VkImage(handle));
408    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
409        return getBufferState(my_data, VkBuffer(handle));
410    default:
411        break;
412    }
413    return nullptr;
414}
415// prototype
416static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
417
418// Helper function to validate correct usage bits set for buffers or images
419//  Verify that (actual & desired) flags != 0 or,
420//   if strict is true, verify that (actual & desired) flags == desired
421//  In case of error, report it via dbg callbacks
422static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle,
423                                 VkDebugReportObjectTypeEXT obj_type, int32_t const msgCode, char const *ty_str,
424                                 char const *func_name, char const *usage_str) {
425    bool correct_usage = false;
426    bool skip_call = false;
427    if (strict)
428        correct_usage = ((actual & desired) == desired);
429    else
430        correct_usage = ((actual & desired) != 0);
431    if (!correct_usage) {
432        if (msgCode == -1) {
433            // TODO: Fix callers with msgCode == -1 to use correct validation checks.
434            skip_call =
435                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
436                        MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
437                                                            " used by %s. In this case, %s should have %s set during creation.",
438                        ty_str, obj_handle, func_name, ty_str, usage_str);
439        } else {
440            const char *valid_usage = (msgCode == -1) ? "" : validation_error_map[msgCode];
441            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, msgCode, "MEM",
442                                "Invalid usage flag for %s 0x%" PRIxLEAST64
443                                " used by %s. In this case, %s should have %s set during creation. %s",
444                                ty_str, obj_handle, func_name, ty_str, usage_str, valid_usage);
445        }
446    }
447    return skip_call;
448}
449
450// Helper function to validate usage flags for buffers
451// For given buffer_state send actual vs. desired usage off to helper above where
452//  an error will be flagged if usage is not correct
453static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFlags desired, VkBool32 strict,
454                                    int32_t const msgCode, char const *func_name, char const *usage_string) {
455    return validate_usage_flags(dev_data, image_state->createInfo.usage, desired, strict,
456                                reinterpret_cast<const uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
457                                msgCode, "image", func_name, usage_string);
458}
459
460// Helper function to validate usage flags for buffers
461// For given buffer_state send actual vs. desired usage off to helper above where
462//  an error will be flagged if usage is not correct
463static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_STATE const *buffer_state, VkFlags desired, VkBool32 strict,
464                                     int32_t const msgCode, char const *func_name, char const *usage_string) {
465    return validate_usage_flags(dev_data, buffer_state->createInfo.usage, desired, strict,
466                                reinterpret_cast<const uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
467                                msgCode, "buffer", func_name, usage_string);
468}
469
470// Return ptr to info in map container containing mem, or NULL if not found
471//  Calls to this function should be wrapped in mutex
472DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
473    auto mem_it = dev_data->memObjMap.find(mem);
474    if (mem_it == dev_data->memObjMap.end()) {
475        return NULL;
476    }
477    return mem_it->second.get();
478}
479
480static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
481                             const VkMemoryAllocateInfo *pAllocateInfo) {
482    assert(object != NULL);
483
484    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
485}
486
487// Helper function to print lowercase string of object type
488//  TODO: Unify string helper functions, this should really come out of a string helper if not there already
489static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
490    switch (type) {
491    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
492        return "image";
493    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
494        return "buffer";
495    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
496        return "image view";
497    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
498        return "buffer view";
499    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
500        return "swapchain";
501    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
502        return "descriptor set";
503    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
504        return "framebuffer";
505    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
506        return "event";
507    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
508        return "query pool";
509    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
510        return "descriptor pool";
511    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
512        return "command pool";
513    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
514        return "pipeline";
515    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
516        return "sampler";
517    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
518        return "renderpass";
519    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
520        return "device memory";
521    case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
522        return "semaphore";
523    default:
524        return "unknown";
525    }
526}
527
528// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
529static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
530                                  VkDebugReportObjectTypeEXT type, const char *functionName) {
531    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
532    if (mem_info) {
533        if (!mem_info->bound_ranges[bound_object_handle].valid) {
534            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
535                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
536                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
537                           ", please fill the memory before using.",
538                           functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
539        }
540    }
541    return false;
542}
543// For given image_state
544//  If mem is special swapchain key, then verify that image_state valid member is true
545//  Else verify that the image's bound memory range is valid
546static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
547    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
548        if (!image_state->valid) {
549            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
550                           reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
551                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
552                           functionName, reinterpret_cast<uint64_t &>(image_state->image));
553        }
554    } else {
555        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image),
556                                     VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
557    }
558    return false;
559}
560// For given buffer_state, verify that the range it's bound to is valid
561static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
562    return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer),
563                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
564}
565// For the given memory allocation, set the range bound by the given handle object to the valid param value
566static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
567    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
568    if (mem_info) {
569        mem_info->bound_ranges[handle].valid = valid;
570    }
571}
572// For given image node
573//  If mem is special swapchain key, then set entire image_state to valid param value
574//  Else set the image's bound memory range to valid param value
575static void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
576    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
577        image_state->valid = valid;
578    } else {
579        SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid);
580    }
581}
582// For given buffer node set the buffer's bound memory range to valid param value
583static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
584    SetMemoryValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer), valid);
585}
586// Find CB Info and add mem reference to list container
587// Find Mem Obj Info and add CB reference to list container
588static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
589                                              const char *apiName) {
590    bool skip_call = false;
591
592    // Skip validation if this image was created through WSI
593    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
594
595        // First update CB binding in MemObj mini CB list
596        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
597        if (pMemInfo) {
598            // Now update CBInfo's Mem reference list
599            GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, cb);
600            pMemInfo->cb_bindings.insert(cb_node);
601            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
602            if (cb_node) {
603                cb_node->memObjs.insert(mem);
604            }
605        }
606    }
607    return skip_call;
608}
609
610// Create binding link between given sampler and command buffer node
611void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
612    sampler_state->cb_bindings.insert(cb_node);
613    cb_node->object_bindings.insert(
614        {reinterpret_cast<uint64_t &>(sampler_state->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
615}
616
617// Create binding link between given image node and command buffer node
618void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
619    // Skip validation if this image was created through WSI
620    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
621        // First update CB binding in MemObj mini CB list
622        for (auto mem_binding : image_state->GetBoundMemory()) {
623            DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
624            if (pMemInfo) {
625                pMemInfo->cb_bindings.insert(cb_node);
626                // Now update CBInfo's Mem reference list
627                cb_node->memObjs.insert(mem_binding);
628            }
629        }
630        // Now update cb binding for image
631        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
632        image_state->cb_bindings.insert(cb_node);
633    }
634}
635
636// Create binding link between given image view node and its image with command buffer node
637void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
638    // First add bindings for imageView
639    view_state->cb_bindings.insert(cb_node);
640    cb_node->object_bindings.insert(
641        {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
642    auto image_state = getImageState(dev_data, view_state->create_info.image);
643    // Add bindings for image within imageView
644    if (image_state) {
645        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
646    }
647}
648
649// Create binding link between given buffer node and command buffer node
650void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
651    // First update CB binding in MemObj mini CB list
652    for (auto mem_binding : buffer_state->GetBoundMemory()) {
653        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
654        if (pMemInfo) {
655            pMemInfo->cb_bindings.insert(cb_node);
656            // Now update CBInfo's Mem reference list
657            cb_node->memObjs.insert(mem_binding);
658        }
659    }
660    // Now update cb binding for buffer
661    cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
662    buffer_state->cb_bindings.insert(cb_node);
663}
664
665// Create binding link between given buffer view node and its buffer with command buffer node
666void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
667    // First add bindings for bufferView
668    view_state->cb_bindings.insert(cb_node);
669    cb_node->object_bindings.insert(
670        {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
671    auto buffer_state = getBufferState(dev_data, view_state->create_info.buffer);
672    // Add bindings for buffer within bufferView
673    if (buffer_state) {
674        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
675    }
676}
677
678// For every mem obj bound to particular CB, free bindings related to that CB
679static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
680    if (cb_node) {
681        if (cb_node->memObjs.size() > 0) {
682            for (auto mem : cb_node->memObjs) {
683                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
684                if (pInfo) {
685                    pInfo->cb_bindings.erase(cb_node);
686                }
687            }
688            cb_node->memObjs.clear();
689        }
690        cb_node->validate_functions.clear();
691    }
692}
693// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
694static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
695    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
696}
697
698// Clear a single object binding from given memory object, or report error if binding is missing
699static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory mem) {
700    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
701    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
702    if (mem_info) {
703        mem_info->obj_bindings.erase({handle, type});
704    }
705    return false;
706}
707
708// ClearMemoryObjectBindings clears the binding of objects to memory
709//  For the given object it pulls the memory bindings and makes sure that the bindings
710//  no longer refer to the object being cleared. This occurs when objects are destroyed.
711static bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
712    bool skip = false;
713    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
714    if (mem_binding) {
715        if (!mem_binding->sparse) {
716            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
717        } else { // Sparse, clear all bindings
718            for (auto& sparse_mem_binding : mem_binding->sparse_bindings) {
719                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
720            }
721        }
722    }
723    return skip;
724}
725
726// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
727bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
728                              const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
729    bool result = false;
730    if (VK_NULL_HANDLE == mem) {
731        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
732                         __LINE__, error_code, "MEM",
733                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling "
734                         "vkBind%sMemory(). %s",
735                         api_name, type_name, handle, type_name, validation_error_map[error_code]);
736    } else if (MEMORY_UNBOUND == mem) {
737        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
738                         __LINE__, error_code, "MEM",
739                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. "
740                         "Memory must not be freed prior to this operation. %s",
741                         api_name, type_name, handle, validation_error_map[error_code]);
742    }
743    return result;
744}
745
746// Check to see if memory was ever bound to this image
747bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
748                                  UNIQUE_VALIDATION_ERROR_CODE error_code) {
749    bool result = false;
750    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
751        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem,
752                                          reinterpret_cast<const uint64_t &>(image_state->image), api_name, "Image", error_code);
753    }
754    return result;
755}
756
757// Check to see if memory was bound to this buffer
758bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
759                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
760    bool result = false;
761    if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
762        result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem,
763                                          reinterpret_cast<const uint64_t &>(buffer_state->buffer), api_name, "Buffer", error_code);
764    }
765    return result;
766}
767
768// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object
769// For NULL mem case, output warning
770// Make sure given object is in global object map
771//  IF a previous binding existed, output validation error
772//  Otherwise, add reference from objectInfo to memoryInfo
773//  Add reference off of objInfo
774// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
775static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
776                          const char *apiName) {
777    bool skip_call = false;
778    // It's an error to bind an object to NULL memory
779    if (mem != VK_NULL_HANDLE) {
780        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
781        assert(mem_binding);
782        // TODO : Add check here to make sure object isn't sparse
783        //  VALIDATION_ERROR_00792 for buffers
784        //  VALIDATION_ERROR_00804 for images
785        assert(!mem_binding->sparse);
786        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
787        if (mem_info) {
788            DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, mem_binding->binding.mem);
789            if (prev_binding) {
790                // TODO: VALIDATION_ERROR_00791 and VALIDATION_ERROR_00803
791                skip_call |=
792                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
793                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
794                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
795                            ") which has already been bound to mem object 0x%" PRIxLEAST64,
796                            apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
797            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
798                skip_call |=
799                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
800                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
801                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
802                            ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
803                            "Vulkan so this attempt to bind to new memory is not allowed.",
804                            apiName, reinterpret_cast<uint64_t &>(mem), handle);
805            } else {
806                mem_info->obj_bindings.insert({handle, type});
807                // For image objects, make sure default memory state is correctly set
808                // TODO : What's the best/correct way to handle this?
809                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
810                    auto const image_state = getImageState(dev_data, VkImage(handle));
811                    if (image_state) {
812                        VkImageCreateInfo ici = image_state->createInfo;
813                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
814                            // TODO::  More memory state transition stuff.
815                        }
816                    }
817                }
818                mem_binding->binding.mem = mem;
819            }
820        }
821    }
822    return skip_call;
823}
824
825// For NULL mem case, clear any previous binding Else...
826// Make sure given object is in its object map
827//  IF a previous binding existed, update binding
828//  Add reference from objectInfo to memoryInfo
829//  Add reference off of object's binding info
830// Return VK_TRUE if addition is successful, VK_FALSE otherwise
831static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VkDebugReportObjectTypeEXT type,
832                                const char *apiName) {
833    bool skip_call = VK_FALSE;
834    // Handle NULL case separately, just clear previous binding & decrement reference
835    if (binding.mem == VK_NULL_HANDLE) {
836        // TODO : This should cause the range of the resource to be unbound according to spec
837    } else {
838        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
839        assert(mem_binding);
840        assert(mem_binding->sparse);
841        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, binding.mem);
842        if (mem_info) {
843            mem_info->obj_bindings.insert({handle, type});
844            // Need to set mem binding for this object
845            mem_binding->sparse_bindings.insert(binding);
846        }
847    }
848    return skip_call;
849}
850
851// Return a string representation of CMD_TYPE enum
852static string cmdTypeToString(CMD_TYPE cmd) {
853    switch (cmd) {
854    case CMD_BINDPIPELINE:
855        return "CMD_BINDPIPELINE";
856    case CMD_BINDPIPELINEDELTA:
857        return "CMD_BINDPIPELINEDELTA";
858    case CMD_SETVIEWPORTSTATE:
859        return "CMD_SETVIEWPORTSTATE";
860    case CMD_SETLINEWIDTHSTATE:
861        return "CMD_SETLINEWIDTHSTATE";
862    case CMD_SETDEPTHBIASSTATE:
863        return "CMD_SETDEPTHBIASSTATE";
864    case CMD_SETBLENDSTATE:
865        return "CMD_SETBLENDSTATE";
866    case CMD_SETDEPTHBOUNDSSTATE:
867        return "CMD_SETDEPTHBOUNDSSTATE";
868    case CMD_SETSTENCILREADMASKSTATE:
869        return "CMD_SETSTENCILREADMASKSTATE";
870    case CMD_SETSTENCILWRITEMASKSTATE:
871        return "CMD_SETSTENCILWRITEMASKSTATE";
872    case CMD_SETSTENCILREFERENCESTATE:
873        return "CMD_SETSTENCILREFERENCESTATE";
874    case CMD_BINDDESCRIPTORSETS:
875        return "CMD_BINDDESCRIPTORSETS";
876    case CMD_BINDINDEXBUFFER:
877        return "CMD_BINDINDEXBUFFER";
878    case CMD_BINDVERTEXBUFFER:
879        return "CMD_BINDVERTEXBUFFER";
880    case CMD_DRAW:
881        return "CMD_DRAW";
882    case CMD_DRAWINDEXED:
883        return "CMD_DRAWINDEXED";
884    case CMD_DRAWINDIRECT:
885        return "CMD_DRAWINDIRECT";
886    case CMD_DRAWINDEXEDINDIRECT:
887        return "CMD_DRAWINDEXEDINDIRECT";
888    case CMD_DISPATCH:
889        return "CMD_DISPATCH";
890    case CMD_DISPATCHINDIRECT:
891        return "CMD_DISPATCHINDIRECT";
892    case CMD_COPYBUFFER:
893        return "CMD_COPYBUFFER";
894    case CMD_COPYIMAGE:
895        return "CMD_COPYIMAGE";
896    case CMD_BLITIMAGE:
897        return "CMD_BLITIMAGE";
898    case CMD_COPYBUFFERTOIMAGE:
899        return "CMD_COPYBUFFERTOIMAGE";
900    case CMD_COPYIMAGETOBUFFER:
901        return "CMD_COPYIMAGETOBUFFER";
902    case CMD_CLONEIMAGEDATA:
903        return "CMD_CLONEIMAGEDATA";
904    case CMD_UPDATEBUFFER:
905        return "CMD_UPDATEBUFFER";
906    case CMD_FILLBUFFER:
907        return "CMD_FILLBUFFER";
908    case CMD_CLEARCOLORIMAGE:
909        return "CMD_CLEARCOLORIMAGE";
910    case CMD_CLEARATTACHMENTS:
911        return "CMD_CLEARCOLORATTACHMENT";
912    case CMD_CLEARDEPTHSTENCILIMAGE:
913        return "CMD_CLEARDEPTHSTENCILIMAGE";
914    case CMD_RESOLVEIMAGE:
915        return "CMD_RESOLVEIMAGE";
916    case CMD_SETEVENT:
917        return "CMD_SETEVENT";
918    case CMD_RESETEVENT:
919        return "CMD_RESETEVENT";
920    case CMD_WAITEVENTS:
921        return "CMD_WAITEVENTS";
922    case CMD_PIPELINEBARRIER:
923        return "CMD_PIPELINEBARRIER";
924    case CMD_BEGINQUERY:
925        return "CMD_BEGINQUERY";
926    case CMD_ENDQUERY:
927        return "CMD_ENDQUERY";
928    case CMD_RESETQUERYPOOL:
929        return "CMD_RESETQUERYPOOL";
930    case CMD_COPYQUERYPOOLRESULTS:
931        return "CMD_COPYQUERYPOOLRESULTS";
932    case CMD_WRITETIMESTAMP:
933        return "CMD_WRITETIMESTAMP";
934    case CMD_INITATOMICCOUNTERS:
935        return "CMD_INITATOMICCOUNTERS";
936    case CMD_LOADATOMICCOUNTERS:
937        return "CMD_LOADATOMICCOUNTERS";
938    case CMD_SAVEATOMICCOUNTERS:
939        return "CMD_SAVEATOMICCOUNTERS";
940    case CMD_BEGINRENDERPASS:
941        return "CMD_BEGINRENDERPASS";
942    case CMD_ENDRENDERPASS:
943        return "CMD_ENDRENDERPASS";
944    default:
945        return "UNKNOWN";
946    }
947}
948
949// SPIRV utility functions
950static void build_def_index(shader_module *module) {
951    for (auto insn : *module) {
952        switch (insn.opcode()) {
953        // Types
954        case spv::OpTypeVoid:
955        case spv::OpTypeBool:
956        case spv::OpTypeInt:
957        case spv::OpTypeFloat:
958        case spv::OpTypeVector:
959        case spv::OpTypeMatrix:
960        case spv::OpTypeImage:
961        case spv::OpTypeSampler:
962        case spv::OpTypeSampledImage:
963        case spv::OpTypeArray:
964        case spv::OpTypeRuntimeArray:
965        case spv::OpTypeStruct:
966        case spv::OpTypeOpaque:
967        case spv::OpTypePointer:
968        case spv::OpTypeFunction:
969        case spv::OpTypeEvent:
970        case spv::OpTypeDeviceEvent:
971        case spv::OpTypeReserveId:
972        case spv::OpTypeQueue:
973        case spv::OpTypePipe:
974            module->def_index[insn.word(1)] = insn.offset();
975            break;
976
977        // Fixed constants
978        case spv::OpConstantTrue:
979        case spv::OpConstantFalse:
980        case spv::OpConstant:
981        case spv::OpConstantComposite:
982        case spv::OpConstantSampler:
983        case spv::OpConstantNull:
984            module->def_index[insn.word(2)] = insn.offset();
985            break;
986
987        // Specialization constants
988        case spv::OpSpecConstantTrue:
989        case spv::OpSpecConstantFalse:
990        case spv::OpSpecConstant:
991        case spv::OpSpecConstantComposite:
992        case spv::OpSpecConstantOp:
993            module->def_index[insn.word(2)] = insn.offset();
994            break;
995
996        // Variables
997        case spv::OpVariable:
998            module->def_index[insn.word(2)] = insn.offset();
999            break;
1000
1001        // Functions
1002        case spv::OpFunction:
1003            module->def_index[insn.word(2)] = insn.offset();
1004            break;
1005
1006        default:
1007            // We don't care about any other defs for now.
1008            break;
1009        }
1010    }
1011}
1012
1013static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1014    for (auto insn : *src) {
1015        if (insn.opcode() == spv::OpEntryPoint) {
1016            auto entrypointName = (char const *)&insn.word(3);
1017            auto entrypointStageBits = 1u << insn.word(1);
1018
1019            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1020                return insn;
1021            }
1022        }
1023    }
1024
1025    return src->end();
1026}
1027
1028static char const *storage_class_name(unsigned sc) {
1029    switch (sc) {
1030    case spv::StorageClassInput:
1031        return "input";
1032    case spv::StorageClassOutput:
1033        return "output";
1034    case spv::StorageClassUniformConstant:
1035        return "const uniform";
1036    case spv::StorageClassUniform:
1037        return "uniform";
1038    case spv::StorageClassWorkgroup:
1039        return "workgroup local";
1040    case spv::StorageClassCrossWorkgroup:
1041        return "workgroup global";
1042    case spv::StorageClassPrivate:
1043        return "private global";
1044    case spv::StorageClassFunction:
1045        return "function";
1046    case spv::StorageClassGeneric:
1047        return "generic";
1048    case spv::StorageClassAtomicCounter:
1049        return "atomic counter";
1050    case spv::StorageClassImage:
1051        return "image";
1052    case spv::StorageClassPushConstant:
1053        return "push constant";
1054    default:
1055        return "unknown";
1056    }
1057}
1058
1059// Get the value of an integral constant
1060unsigned get_constant_value(shader_module const *src, unsigned id) {
1061    auto value = src->get_def(id);
1062    assert(value != src->end());
1063
1064    if (value.opcode() != spv::OpConstant) {
1065        // TODO: Either ensure that the specialization transform is already performed on a module we're
1066        //       considering here, OR -- specialize on the fly now.
1067        return 1;
1068    }
1069
1070    return value.word(3);
1071}
1072
1073
1074static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1075    auto insn = src->get_def(type);
1076    assert(insn != src->end());
1077
1078    switch (insn.opcode()) {
1079    case spv::OpTypeBool:
1080        ss << "bool";
1081        break;
1082    case spv::OpTypeInt:
1083        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1084        break;
1085    case spv::OpTypeFloat:
1086        ss << "float" << insn.word(2);
1087        break;
1088    case spv::OpTypeVector:
1089        ss << "vec" << insn.word(3) << " of ";
1090        describe_type_inner(ss, src, insn.word(2));
1091        break;
1092    case spv::OpTypeMatrix:
1093        ss << "mat" << insn.word(3) << " of ";
1094        describe_type_inner(ss, src, insn.word(2));
1095        break;
1096    case spv::OpTypeArray:
1097        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1098        describe_type_inner(ss, src, insn.word(2));
1099        break;
1100    case spv::OpTypePointer:
1101        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1102        describe_type_inner(ss, src, insn.word(3));
1103        break;
1104    case spv::OpTypeStruct: {
1105        ss << "struct of (";
1106        for (unsigned i = 2; i < insn.len(); i++) {
1107            describe_type_inner(ss, src, insn.word(i));
1108            if (i == insn.len() - 1) {
1109                ss << ")";
1110            } else {
1111                ss << ", ";
1112            }
1113        }
1114        break;
1115    }
1116    case spv::OpTypeSampler:
1117        ss << "sampler";
1118        break;
1119    case spv::OpTypeSampledImage:
1120        ss << "sampler+";
1121        describe_type_inner(ss, src, insn.word(2));
1122        break;
1123    case spv::OpTypeImage:
1124        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1125        break;
1126    default:
1127        ss << "oddtype";
1128        break;
1129    }
1130}
1131
1132
1133static std::string describe_type(shader_module const *src, unsigned type) {
1134    std::ostringstream ss;
1135    describe_type_inner(ss, src, type);
1136    return ss.str();
1137}
1138
1139
1140static bool is_narrow_numeric_type(spirv_inst_iter type)
1141{
1142    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1143        return false;
1144    return type.word(2) < 64;
1145}
1146
1147
1148static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1149    // Walk two type trees together, and complain about differences
1150    auto a_insn = a->get_def(a_type);
1151    auto b_insn = b->get_def(b_type);
1152    assert(a_insn != a->end());
1153    assert(b_insn != b->end());
1154
1155    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1156        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1157    }
1158
1159    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1160        // We probably just found the extra level of arrayness in b_type: compare the type inside it to a_type
1161        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1162    }
1163
1164    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1165        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1166    }
1167
1168    if (a_insn.opcode() != b_insn.opcode()) {
1169        return false;
1170    }
1171
1172    if (a_insn.opcode() == spv::OpTypePointer) {
1173        // Match on pointee type. storage class is expected to differ
1174        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1175    }
1176
1177    if (a_arrayed || b_arrayed) {
1178        // If we havent resolved array-of-verts by here, we're not going to.
1179        return false;
1180    }
1181
1182    switch (a_insn.opcode()) {
1183    case spv::OpTypeBool:
1184        return true;
1185    case spv::OpTypeInt:
1186        // Match on width, signedness
1187        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1188    case spv::OpTypeFloat:
1189        // Match on width
1190        return a_insn.word(2) == b_insn.word(2);
1191    case spv::OpTypeVector:
1192        // Match on element type, count.
1193        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1194            return false;
1195        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1196            return a_insn.word(3) >= b_insn.word(3);
1197        }
1198        else {
1199            return a_insn.word(3) == b_insn.word(3);
1200        }
1201    case spv::OpTypeMatrix:
1202        // Match on element type, count.
1203        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1204    case spv::OpTypeArray:
1205        // Match on element type, count. these all have the same layout. we don't get here if b_arrayed. This differs from
1206        // vector & matrix types in that the array size is the id of a constant instruction, * not a literal within OpTypeArray
1207        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1208               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1209    case spv::OpTypeStruct:
1210        // Match on all element types
1211        {
1212            if (a_insn.len() != b_insn.len()) {
1213                return false; // Structs cannot match if member counts differ
1214            }
1215
1216            for (unsigned i = 2; i < a_insn.len(); i++) {
1217                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1218                    return false;
1219                }
1220            }
1221
1222            return true;
1223        }
1224    default:
1225        // Remaining types are CLisms, or may not appear in the interfaces we are interested in. Just claim no match.
1226        return false;
1227    }
1228}
1229
1230static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1231    auto it = map.find(id);
1232    if (it == map.end())
1233        return def;
1234    else
1235        return it->second;
1236}
1237
1238static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1239    auto insn = src->get_def(type);
1240    assert(insn != src->end());
1241
1242    switch (insn.opcode()) {
1243    case spv::OpTypePointer:
1244        // See through the ptr -- this is only ever at the toplevel for graphics shaders we're never actually passing
1245        // pointers around.
1246        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1247    case spv::OpTypeArray:
1248        if (strip_array_level) {
1249            return get_locations_consumed_by_type(src, insn.word(2), false);
1250        } else {
1251            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1252        }
1253    case spv::OpTypeMatrix:
1254        // Num locations is the dimension * element size
1255        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1256    case spv::OpTypeVector: {
1257        auto scalar_type = src->get_def(insn.word(2));
1258        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1259            scalar_type.word(2) : 32;
1260
1261        // Locations are 128-bit wide; 3- and 4-component vectors of 64 bit types require two.
1262        return (bit_width * insn.word(3) + 127) / 128;
1263    }
1264    default:
1265        // Everything else is just 1.
1266        return 1;
1267
1268        // TODO: extend to handle 64bit scalar types, whose vectors may need multiple locations.
1269    }
1270}
1271
1272static unsigned get_locations_consumed_by_format(VkFormat format) {
1273    switch (format) {
1274    case VK_FORMAT_R64G64B64A64_SFLOAT:
1275    case VK_FORMAT_R64G64B64A64_SINT:
1276    case VK_FORMAT_R64G64B64A64_UINT:
1277    case VK_FORMAT_R64G64B64_SFLOAT:
1278    case VK_FORMAT_R64G64B64_SINT:
1279    case VK_FORMAT_R64G64B64_UINT:
1280        return 2;
1281    default:
1282        return 1;
1283    }
1284}
1285
1286typedef std::pair<unsigned, unsigned> location_t;
1287typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1288
1289struct interface_var {
1290    uint32_t id;
1291    uint32_t type_id;
1292    uint32_t offset;
1293    bool is_patch;
1294    bool is_block_member;
1295    bool is_relaxed_precision;
1296    // TODO: collect the name, too? Isn't required to be present.
1297};
1298
1299struct shader_stage_attributes {
1300    char const *const name;
1301    bool arrayed_input;
1302    bool arrayed_output;
1303};
1304
1305static shader_stage_attributes shader_stage_attribs[] = {
1306    {"vertex shader", false, false},
1307    {"tessellation control shader", true, true},
1308    {"tessellation evaluation shader", true, false},
1309    {"geometry shader", true, false},
1310    {"fragment shader", false, false},
1311};
1312
1313static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1314    while (true) {
1315
1316        if (def.opcode() == spv::OpTypePointer) {
1317            def = src->get_def(def.word(3));
1318        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1319            def = src->get_def(def.word(2));
1320            is_array_of_verts = false;
1321        } else if (def.opcode() == spv::OpTypeStruct) {
1322            return def;
1323        } else {
1324            return src->end();
1325        }
1326    }
1327}
1328
1329static void collect_interface_block_members(shader_module const *src,
1330                                            std::map<location_t, interface_var> *out,
1331                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1332                                            uint32_t id, uint32_t type_id, bool is_patch) {
1333    // Walk down the type_id presented, trying to determine whether it's actually an interface block.
1334    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1335    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1336        // This isn't an interface block.
1337        return;
1338    }
1339
1340    std::unordered_map<unsigned, unsigned> member_components;
1341    std::unordered_map<unsigned, unsigned> member_relaxed_precision;
1342
1343    // Walk all the OpMemberDecorate for type's result id -- first pass, collect components.
1344    for (auto insn : *src) {
1345        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1346            unsigned member_index = insn.word(2);
1347
1348            if (insn.word(3) == spv::DecorationComponent) {
1349                unsigned component = insn.word(4);
1350                member_components[member_index] = component;
1351            }
1352
1353            if (insn.word(3) == spv::DecorationRelaxedPrecision) {
1354                member_relaxed_precision[member_index] = 1;
1355            }
1356        }
1357    }
1358
1359    // Second pass -- produce the output, from Location decorations
1360    for (auto insn : *src) {
1361        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1362            unsigned member_index = insn.word(2);
1363            unsigned member_type_id = type.word(2 + member_index);
1364
1365            if (insn.word(3) == spv::DecorationLocation) {
1366                unsigned location = insn.word(4);
1367                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1368                auto component_it = member_components.find(member_index);
1369                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1370                bool is_relaxed_precision = member_relaxed_precision.find(member_index) != member_relaxed_precision.end();
1371
1372                for (unsigned int offset = 0; offset < num_locations; offset++) {
1373                    interface_var v = {};
1374                    v.id = id;
1375                    // TODO: member index in interface_var too?
1376                    v.type_id = member_type_id;
1377                    v.offset = offset;
1378                    v.is_patch = is_patch;
1379                    v.is_block_member = true;
1380                    v.is_relaxed_precision = is_relaxed_precision;
1381                    (*out)[std::make_pair(location + offset, component)] = v;
1382                }
1383            }
1384        }
1385    }
1386}
1387
1388static std::map<location_t, interface_var> collect_interface_by_location(
1389        shader_module const *src, spirv_inst_iter entrypoint,
1390        spv::StorageClass sinterface, bool is_array_of_verts) {
1391
1392    std::unordered_map<unsigned, unsigned> var_locations;
1393    std::unordered_map<unsigned, unsigned> var_builtins;
1394    std::unordered_map<unsigned, unsigned> var_components;
1395    std::unordered_map<unsigned, unsigned> blocks;
1396    std::unordered_map<unsigned, unsigned> var_patch;
1397    std::unordered_map<unsigned, unsigned> var_relaxed_precision;
1398
1399    for (auto insn : *src) {
1400
1401        // We consider two interface models: SSO rendezvous-by-location, and builtins. Complain about anything that
1402        // fits neither model.
1403        if (insn.opcode() == spv::OpDecorate) {
1404            if (insn.word(2) == spv::DecorationLocation) {
1405                var_locations[insn.word(1)] = insn.word(3);
1406            }
1407
1408            if (insn.word(2) == spv::DecorationBuiltIn) {
1409                var_builtins[insn.word(1)] = insn.word(3);
1410            }
1411
1412            if (insn.word(2) == spv::DecorationComponent) {
1413                var_components[insn.word(1)] = insn.word(3);
1414            }
1415
1416            if (insn.word(2) == spv::DecorationBlock) {
1417                blocks[insn.word(1)] = 1;
1418            }
1419
1420            if (insn.word(2) == spv::DecorationPatch) {
1421                var_patch[insn.word(1)] = 1;
1422            }
1423
1424            if (insn.word(2) == spv::DecorationRelaxedPrecision) {
1425                var_relaxed_precision[insn.word(1)] = 1;
1426            }
1427        }
1428    }
1429
1430    // TODO: handle grouped decorations
1431    // TODO: handle index=1 dual source outputs from FS -- two vars will have the same location, and we DON'T want to clobber.
1432
1433    // Find the end of the entrypoint's name string. additional zero bytes follow the actual null terminator, to fill out the
1434    // rest of the word - so we only need to look at the last byte in the word to determine which word contains the terminator.
1435    uint32_t word = 3;
1436    while (entrypoint.word(word) & 0xff000000u) {
1437        ++word;
1438    }
1439    ++word;
1440
1441    std::map<location_t, interface_var> out;
1442
1443    for (; word < entrypoint.len(); word++) {
1444        auto insn = src->get_def(entrypoint.word(word));
1445        assert(insn != src->end());
1446        assert(insn.opcode() == spv::OpVariable);
1447
1448        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1449            unsigned id = insn.word(2);
1450            unsigned type = insn.word(1);
1451
1452            int location = value_or_default(var_locations, id, -1);
1453            int builtin = value_or_default(var_builtins, id, -1);
1454            unsigned component = value_or_default(var_components, id, 0); // Unspecified is OK, is 0
1455            bool is_patch = var_patch.find(id) != var_patch.end();
1456            bool is_relaxed_precision = var_relaxed_precision.find(id) != var_relaxed_precision.end();
1457
1458            // All variables and interface block members in the Input or Output storage classes must be decorated with either
1459            // a builtin or an explicit location.
1460            //
1461            // TODO: integrate the interface block support here. For now, don't complain -- a valid SPIRV module will only hit
1462            // this path for the interface block case, as the individual members of the type are decorated, rather than
1463            // variable declarations.
1464
1465            if (location != -1) {
1466                // A user-defined interface variable, with a location. Where a variable occupied multiple locations, emit
1467                // one result for each.
1468                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1469                for (unsigned int offset = 0; offset < num_locations; offset++) {
1470                    interface_var v = {};
1471                    v.id = id;
1472                    v.type_id = type;
1473                    v.offset = offset;
1474                    v.is_patch = is_patch;
1475                    v.is_relaxed_precision = is_relaxed_precision;
1476                    out[std::make_pair(location + offset, component)] = v;
1477                }
1478            } else if (builtin == -1) {
1479                // An interface block instance
1480                collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1481            }
1482        }
1483    }
1484
1485    return out;
1486}
1487
1488static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1489        debug_report_data *report_data, shader_module const *src,
1490        std::unordered_set<uint32_t> const &accessible_ids) {
1491
1492    std::vector<std::pair<uint32_t, interface_var>> out;
1493
1494    for (auto insn : *src) {
1495        if (insn.opcode() == spv::OpDecorate) {
1496            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1497                auto attachment_index = insn.word(3);
1498                auto id = insn.word(1);
1499
1500                if (accessible_ids.count(id)) {
1501                    auto def = src->get_def(id);
1502                    assert(def != src->end());
1503
1504                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1505                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1506                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1507                            interface_var v = {};
1508                            v.id = id;
1509                            v.type_id = def.word(1);
1510                            v.offset = offset;
1511                            out.emplace_back(attachment_index + offset, v);
1512                        }
1513                    }
1514                }
1515            }
1516        }
1517    }
1518
1519    return out;
1520}
1521
1522static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1523        debug_report_data *report_data, shader_module const *src,
1524        std::unordered_set<uint32_t> const &accessible_ids) {
1525
1526    std::unordered_map<unsigned, unsigned> var_sets;
1527    std::unordered_map<unsigned, unsigned> var_bindings;
1528
1529    for (auto insn : *src) {
1530        // All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1531        // DecorationDescriptorSet and DecorationBinding.
1532        if (insn.opcode() == spv::OpDecorate) {
1533            if (insn.word(2) == spv::DecorationDescriptorSet) {
1534                var_sets[insn.word(1)] = insn.word(3);
1535            }
1536
1537            if (insn.word(2) == spv::DecorationBinding) {
1538                var_bindings[insn.word(1)] = insn.word(3);
1539            }
1540        }
1541    }
1542
1543    std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1544
1545    for (auto id : accessible_ids) {
1546        auto insn = src->get_def(id);
1547        assert(insn != src->end());
1548
1549        if (insn.opcode() == spv::OpVariable &&
1550            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1551            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1552            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1553
1554            interface_var v = {};
1555            v.id = insn.word(2);
1556            v.type_id = insn.word(1);
1557            out.emplace_back(std::make_pair(set, binding), v);
1558        }
1559    }
1560
1561    return out;
1562}
1563
1564static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1565                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1566                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1567                                              shader_stage_attributes const *consumer_stage) {
1568    bool pass = true;
1569
1570    auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1571    auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1572
1573    auto a_it = outputs.begin();
1574    auto b_it = inputs.begin();
1575
1576    // Maps sorted by key (location); walk them together to find mismatches
1577    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1578        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1579        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1580        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1581        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1582
1583        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1584            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1585                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1586                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1587                        a_first.second, consumer_stage->name)) {
1588                pass = false;
1589            }
1590            a_it++;
1591        } else if (a_at_end || a_first > b_first) {
1592            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1593                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1594                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1595                        producer_stage->name)) {
1596                pass = false;
1597            }
1598            b_it++;
1599        } else {
1600            // subtleties of arrayed interfaces:
1601            // - if is_patch, then the member is not arrayed, even though the interface may be.
1602            // - if is_block_member, then the extra array level of an arrayed interface is not
1603            //   expressed in the member type -- it's expressed in the block type.
1604            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1605                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1606                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1607                             true)) {
1608                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1609                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1610                            a_first.first, a_first.second,
1611                            describe_type(producer, a_it->second.type_id).c_str(),
1612                            describe_type(consumer, b_it->second.type_id).c_str())) {
1613                    pass = false;
1614                }
1615            }
1616            if (a_it->second.is_patch != b_it->second.is_patch) {
1617                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
1618                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1619                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1620                            "per-%s in %s stage", a_first.first, a_first.second,
1621                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1622                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1623                    pass = false;
1624                }
1625            }
1626            if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) {
1627                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
1628                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1629                            "Decoration mismatch on location %u.%u: %s and %s stages differ in precision",
1630                            a_first.first, a_first.second,
1631                            producer_stage->name,
1632                            consumer_stage->name)) {
1633                    pass = false;
1634                }
1635            }
1636            a_it++;
1637            b_it++;
1638        }
1639    }
1640
1641    return pass;
1642}
1643
1644enum FORMAT_TYPE {
1645    FORMAT_TYPE_UNDEFINED,
1646    FORMAT_TYPE_FLOAT, // UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader
1647    FORMAT_TYPE_SINT,
1648    FORMAT_TYPE_UINT,
1649};
1650
1651static unsigned get_format_type(VkFormat fmt) {
1652    switch (fmt) {
1653    case VK_FORMAT_UNDEFINED:
1654        return FORMAT_TYPE_UNDEFINED;
1655    case VK_FORMAT_R8_SINT:
1656    case VK_FORMAT_R8G8_SINT:
1657    case VK_FORMAT_R8G8B8_SINT:
1658    case VK_FORMAT_R8G8B8A8_SINT:
1659    case VK_FORMAT_R16_SINT:
1660    case VK_FORMAT_R16G16_SINT:
1661    case VK_FORMAT_R16G16B16_SINT:
1662    case VK_FORMAT_R16G16B16A16_SINT:
1663    case VK_FORMAT_R32_SINT:
1664    case VK_FORMAT_R32G32_SINT:
1665    case VK_FORMAT_R32G32B32_SINT:
1666    case VK_FORMAT_R32G32B32A32_SINT:
1667    case VK_FORMAT_R64_SINT:
1668    case VK_FORMAT_R64G64_SINT:
1669    case VK_FORMAT_R64G64B64_SINT:
1670    case VK_FORMAT_R64G64B64A64_SINT:
1671    case VK_FORMAT_B8G8R8_SINT:
1672    case VK_FORMAT_B8G8R8A8_SINT:
1673    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1674    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1675    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1676        return FORMAT_TYPE_SINT;
1677    case VK_FORMAT_R8_UINT:
1678    case VK_FORMAT_R8G8_UINT:
1679    case VK_FORMAT_R8G8B8_UINT:
1680    case VK_FORMAT_R8G8B8A8_UINT:
1681    case VK_FORMAT_R16_UINT:
1682    case VK_FORMAT_R16G16_UINT:
1683    case VK_FORMAT_R16G16B16_UINT:
1684    case VK_FORMAT_R16G16B16A16_UINT:
1685    case VK_FORMAT_R32_UINT:
1686    case VK_FORMAT_R32G32_UINT:
1687    case VK_FORMAT_R32G32B32_UINT:
1688    case VK_FORMAT_R32G32B32A32_UINT:
1689    case VK_FORMAT_R64_UINT:
1690    case VK_FORMAT_R64G64_UINT:
1691    case VK_FORMAT_R64G64B64_UINT:
1692    case VK_FORMAT_R64G64B64A64_UINT:
1693    case VK_FORMAT_B8G8R8_UINT:
1694    case VK_FORMAT_B8G8R8A8_UINT:
1695    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1696    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1697    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1698        return FORMAT_TYPE_UINT;
1699    default:
1700        return FORMAT_TYPE_FLOAT;
1701    }
1702}
1703
1704// characterizes a SPIR-V type appearing in an interface to a FF stage, for comparison to a VkFormat's characterization above.
1705static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1706    auto insn = src->get_def(type);
1707    assert(insn != src->end());
1708
1709    switch (insn.opcode()) {
1710    case spv::OpTypeInt:
1711        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1712    case spv::OpTypeFloat:
1713        return FORMAT_TYPE_FLOAT;
1714    case spv::OpTypeVector:
1715        return get_fundamental_type(src, insn.word(2));
1716    case spv::OpTypeMatrix:
1717        return get_fundamental_type(src, insn.word(2));
1718    case spv::OpTypeArray:
1719        return get_fundamental_type(src, insn.word(2));
1720    case spv::OpTypePointer:
1721        return get_fundamental_type(src, insn.word(3));
1722    case spv::OpTypeImage:
1723        return get_fundamental_type(src, insn.word(2));
1724
1725    default:
1726        return FORMAT_TYPE_UNDEFINED;
1727    }
1728}
1729
1730static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1731    uint32_t bit_pos = u_ffs(stage);
1732    return bit_pos - 1;
1733}
1734
1735static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1736    // Walk the binding descriptions, which describe the step rate and stride of each vertex buffer.  Each binding should
1737    // be specified only once.
1738    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1739    bool pass = true;
1740
1741    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1742        auto desc = &vi->pVertexBindingDescriptions[i];
1743        auto &binding = bindings[desc->binding];
1744        if (binding) {
1745            // TODO: VALIDATION_ERROR_02105 perhaps?
1746            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1747                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1748                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1749                pass = false;
1750            }
1751        } else {
1752            binding = desc;
1753        }
1754    }
1755
1756    return pass;
1757}
1758
1759static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1760                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1761    bool pass = true;
1762
1763    auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1764
1765    // Build index by location
1766    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1767    if (vi) {
1768        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1769            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1770            for (auto j = 0u; j < num_locations; j++) {
1771                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1772            }
1773        }
1774    }
1775
1776    auto it_a = attribs.begin();
1777    auto it_b = inputs.begin();
1778    bool used = false;
1779
1780    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1781        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1782        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1783        auto a_first = a_at_end ? 0 : it_a->first;
1784        auto b_first = b_at_end ? 0 : it_b->first.first;
1785        if (!a_at_end && (b_at_end || a_first < b_first)) {
1786            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1787                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1788                        "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
1789                pass = false;
1790            }
1791            used = false;
1792            it_a++;
1793        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1794            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
1795                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
1796                        b_first)) {
1797                pass = false;
1798            }
1799            it_b++;
1800        } else {
1801            unsigned attrib_type = get_format_type(it_a->second->format);
1802            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1803
1804            // Type checking
1805            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1806                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1807                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1808                            "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
1809                            string_VkFormat(it_a->second->format), a_first,
1810                            describe_type(vs, it_b->second.type_id).c_str())) {
1811                    pass = false;
1812                }
1813            }
1814
1815            // OK!
1816            used = true;
1817            it_b++;
1818        }
1819    }
1820
1821    return pass;
1822}
1823
1824static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1825                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1826                                                    uint32_t subpass_index) {
1827    std::map<uint32_t, VkFormat> color_attachments;
1828    auto subpass = rpci->pSubpasses[subpass_index];
1829    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1830        uint32_t attachment = subpass.pColorAttachments[i].attachment;
1831        if (attachment == VK_ATTACHMENT_UNUSED)
1832            continue;
1833        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1834            color_attachments[i] = rpci->pAttachments[attachment].format;
1835        }
1836    }
1837
1838    bool pass = true;
1839
1840    // TODO: dual source blend index (spv::DecIndex, zero if not provided)
1841
1842    auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
1843
1844    auto it_a = outputs.begin();
1845    auto it_b = color_attachments.begin();
1846
1847    // Walk attachment list and outputs together
1848
1849    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1850        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1851        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1852
1853        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1854            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1855                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1856                        "fragment shader writes to output location %d with no matching attachment", it_a->first.first)) {
1857                pass = false;
1858            }
1859            it_a++;
1860        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1861            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1862                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader",
1863                        it_b->first)) {
1864                pass = false;
1865            }
1866            it_b++;
1867        } else {
1868            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1869            unsigned att_type = get_format_type(it_b->second);
1870
1871            // Type checking
1872            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1873                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1874                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1875                            "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
1876                            string_VkFormat(it_b->second),
1877                            describe_type(fs, it_a->second.type_id).c_str())) {
1878                    pass = false;
1879                }
1880            }
1881
1882            // OK!
1883            it_a++;
1884            it_b++;
1885        }
1886    }
1887
1888    return pass;
1889}
1890
1891// For some analyses, we need to know about all ids referenced by the static call tree of a particular entrypoint. This is
1892// important for identifying the set of shader resources actually used by an entrypoint, for example.
1893// Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1894//  - NOT the shader input/output interfaces.
1895//
1896// TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1897// converting parts of this to be generated from the machine-readable spec instead.
1898static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
1899    std::unordered_set<uint32_t> ids;
1900    std::unordered_set<uint32_t> worklist;
1901    worklist.insert(entrypoint.word(2));
1902
1903    while (!worklist.empty()) {
1904        auto id_iter = worklist.begin();
1905        auto id = *id_iter;
1906        worklist.erase(id_iter);
1907
1908        auto insn = src->get_def(id);
1909        if (insn == src->end()) {
1910            // ID is something we didn't collect in build_def_index. that's OK -- we'll stumble across all kinds of things here
1911            // that we may not care about.
1912            continue;
1913        }
1914
1915        // Try to add to the output set
1916        if (!ids.insert(id).second) {
1917            continue; // If we already saw this id, we don't want to walk it again.
1918        }
1919
1920        switch (insn.opcode()) {
1921        case spv::OpFunction:
1922            // Scan whole body of the function, enlisting anything interesting
1923            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1924                switch (insn.opcode()) {
1925                case spv::OpLoad:
1926                case spv::OpAtomicLoad:
1927                case spv::OpAtomicExchange:
1928                case spv::OpAtomicCompareExchange:
1929                case spv::OpAtomicCompareExchangeWeak:
1930                case spv::OpAtomicIIncrement:
1931                case spv::OpAtomicIDecrement:
1932                case spv::OpAtomicIAdd:
1933                case spv::OpAtomicISub:
1934                case spv::OpAtomicSMin:
1935                case spv::OpAtomicUMin:
1936                case spv::OpAtomicSMax:
1937                case spv::OpAtomicUMax:
1938                case spv::OpAtomicAnd:
1939                case spv::OpAtomicOr:
1940                case spv::OpAtomicXor:
1941                    worklist.insert(insn.word(3)); // ptr
1942                    break;
1943                case spv::OpStore:
1944                case spv::OpAtomicStore:
1945                    worklist.insert(insn.word(1)); // ptr
1946                    break;
1947                case spv::OpAccessChain:
1948                case spv::OpInBoundsAccessChain:
1949                    worklist.insert(insn.word(3)); // base ptr
1950                    break;
1951                case spv::OpSampledImage:
1952                case spv::OpImageSampleImplicitLod:
1953                case spv::OpImageSampleExplicitLod:
1954                case spv::OpImageSampleDrefImplicitLod:
1955                case spv::OpImageSampleDrefExplicitLod:
1956                case spv::OpImageSampleProjImplicitLod:
1957                case spv::OpImageSampleProjExplicitLod:
1958                case spv::OpImageSampleProjDrefImplicitLod:
1959                case spv::OpImageSampleProjDrefExplicitLod:
1960                case spv::OpImageFetch:
1961                case spv::OpImageGather:
1962                case spv::OpImageDrefGather:
1963                case spv::OpImageRead:
1964                case spv::OpImage:
1965                case spv::OpImageQueryFormat:
1966                case spv::OpImageQueryOrder:
1967                case spv::OpImageQuerySizeLod:
1968                case spv::OpImageQuerySize:
1969                case spv::OpImageQueryLod:
1970                case spv::OpImageQueryLevels:
1971                case spv::OpImageQuerySamples:
1972                case spv::OpImageSparseSampleImplicitLod:
1973                case spv::OpImageSparseSampleExplicitLod:
1974                case spv::OpImageSparseSampleDrefImplicitLod:
1975                case spv::OpImageSparseSampleDrefExplicitLod:
1976                case spv::OpImageSparseSampleProjImplicitLod:
1977                case spv::OpImageSparseSampleProjExplicitLod:
1978                case spv::OpImageSparseSampleProjDrefImplicitLod:
1979                case spv::OpImageSparseSampleProjDrefExplicitLod:
1980                case spv::OpImageSparseFetch:
1981                case spv::OpImageSparseGather:
1982                case spv::OpImageSparseDrefGather:
1983                case spv::OpImageTexelPointer:
1984                    worklist.insert(insn.word(3)); // Image or sampled image
1985                    break;
1986                case spv::OpImageWrite:
1987                    worklist.insert(insn.word(1)); // Image -- different operand order to above
1988                    break;
1989                case spv::OpFunctionCall:
1990                    for (uint32_t i = 3; i < insn.len(); i++) {
1991                        worklist.insert(insn.word(i)); // fn itself, and all args
1992                    }
1993                    break;
1994
1995                case spv::OpExtInst:
1996                    for (uint32_t i = 5; i < insn.len(); i++) {
1997                        worklist.insert(insn.word(i)); // Operands to ext inst
1998                    }
1999                    break;
2000                }
2001            }
2002            break;
2003        }
2004    }
2005
2006    return ids;
2007}
2008
2009static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
2010                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
2011                                                          shader_module const *src, spirv_inst_iter type,
2012                                                          VkShaderStageFlagBits stage) {
2013    bool pass = true;
2014
2015    // Strip off ptrs etc
2016    type = get_struct_type(src, type, false);
2017    assert(type != src->end());
2018
2019    // Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step.
2020    // TODO: arrays, matrices, weird sizes
2021    for (auto insn : *src) {
2022        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2023
2024            if (insn.word(3) == spv::DecorationOffset) {
2025                unsigned offset = insn.word(4);
2026                auto size = 4; // Bytes; TODO: calculate this based on the type
2027
2028                bool found_range = false;
2029                for (auto const &range : *push_constant_ranges) {
2030                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2031                        found_range = true;
2032
2033                        if ((range.stageFlags & stage) == 0) {
2034                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2035                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2036                                        "Push constant range covering variable starting at "
2037                                        "offset %u not accessible from stage %s",
2038                                        offset, string_VkShaderStageFlagBits(stage))) {
2039                                pass = false;
2040                            }
2041                        }
2042
2043                        break;
2044                    }
2045                }
2046
2047                if (!found_range) {
2048                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2049                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2050                                "Push constant range covering variable starting at "
2051                                "offset %u not declared in layout",
2052                                offset)) {
2053                        pass = false;
2054                    }
2055                }
2056            }
2057        }
2058    }
2059
2060    return pass;
2061}
2062
2063static bool validate_push_constant_usage(debug_report_data *report_data,
2064                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2065                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2066    bool pass = true;
2067
2068    for (auto id : accessible_ids) {
2069        auto def_insn = src->get_def(id);
2070        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2071            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2072                                                                  src->get_def(def_insn.word(1)), stage);
2073        }
2074    }
2075
2076    return pass;
2077}
2078
2079// For given pipelineLayout verify that the set_layout_node at slot.first
2080//  has the requested binding at slot.second and return ptr to that binding
2081static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2082
2083    if (!pipelineLayout)
2084        return nullptr;
2085
2086    if (slot.first >= pipelineLayout->set_layouts.size())
2087        return nullptr;
2088
2089    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2090}
2091
2092// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2093
2094// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2095//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2096//   to that same cmd buffer by separate thread are not changing state from underneath us
2097// Track the last cmd buffer touched by this thread
2098
2099static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2100    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2101        if (pCB->drawCount[i])
2102            return true;
2103    }
2104    return false;
2105}
2106
2107// Check object status for selected flag state
2108static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2109                            const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2110    if (!(pNode->status & status_mask)) {
2111        char const *const message = validation_error_map[msg_code];
2112        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2113                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, msg_code, "DS",
2114                       "command buffer object 0x%p: %s. %s.", pNode->commandBuffer, fail_msg, message);
2115    }
2116    return false;
2117}
2118
2119// Retrieve pipeline node ptr for given pipeline object
2120static PIPELINE_STATE *getPipelineState(layer_data const *my_data, VkPipeline pipeline) {
2121    auto it = my_data->pipelineMap.find(pipeline);
2122    if (it == my_data->pipelineMap.end()) {
2123        return nullptr;
2124    }
2125    return it->second;
2126}
2127
2128static RENDER_PASS_STATE *getRenderPassState(layer_data const *my_data, VkRenderPass renderpass) {
2129    auto it = my_data->renderPassMap.find(renderpass);
2130    if (it == my_data->renderPassMap.end()) {
2131        return nullptr;
2132    }
2133    return it->second.get();
2134}
2135
2136static FRAMEBUFFER_STATE *getFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer) {
2137    auto it = my_data->frameBufferMap.find(framebuffer);
2138    if (it == my_data->frameBufferMap.end()) {
2139        return nullptr;
2140    }
2141    return it->second.get();
2142}
2143
2144cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2145    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2146    if (it == my_data->descriptorSetLayoutMap.end()) {
2147        return nullptr;
2148    }
2149    return it->second;
2150}
2151
2152static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2153    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2154    if (it == my_data->pipelineLayoutMap.end()) {
2155        return nullptr;
2156    }
2157    return &it->second;
2158}
2159
2160// Return true if for a given PSO, the given state enum is dynamic, else return false
2161static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
2162    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2163        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2164            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2165                return true;
2166        }
2167    }
2168    return false;
2169}
2170
2171// Validate state stored as flags at time of draw call
2172static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
2173                                      UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2174    bool result = false;
2175    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2176        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2177         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2178        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2179                                  "Dynamic line width state not set for this command buffer", msg_code);
2180    }
2181    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2182        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2183        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2184                                  "Dynamic depth bias state not set for this command buffer", msg_code);
2185    }
2186    if (pPipe->blendConstantsEnabled) {
2187        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2188                                  "Dynamic blend constants state not set for this command buffer", msg_code);
2189    }
2190    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2191        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2192        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2193                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
2194    }
2195    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2196        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2197        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2198                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
2199        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2200                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
2201        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2202                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
2203    }
2204    if (indexed) {
2205        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2206                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
2207    }
2208
2209    return result;
2210}
2211
2212// Verify attachment reference compatibility according to spec
2213//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2214//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2215//   to make sure that format and samples counts match.
2216//  If not, they are not compatible.
2217static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2218                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2219                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2220                                             const VkAttachmentDescription *pSecondaryAttachments) {
2221    // Check potential NULL cases first to avoid nullptr issues later
2222    if (pPrimary == nullptr) {
2223        if (pSecondary == nullptr) {
2224            return true;
2225        }
2226        return false;
2227    } else if (pSecondary == nullptr) {
2228        return false;
2229    }
2230    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2231        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2232            return true;
2233    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2234        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2235            return true;
2236    } else { // Format and sample count must match
2237        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2238            return true;
2239        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2240            return false;
2241        }
2242        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2243             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2244            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2245             pSecondaryAttachments[pSecondary[index].attachment].samples))
2246            return true;
2247    }
2248    // Format and sample counts didn't match
2249    return false;
2250}
2251// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2252// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2253static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2254                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2255    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2256        stringstream errorStr;
2257        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2258                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2259        errorMsg = errorStr.str();
2260        return false;
2261    }
2262    uint32_t spIndex = 0;
2263    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2264        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2265        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2266        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2267        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2268        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2269            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2270                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2271                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2272                stringstream errorStr;
2273                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2274                errorMsg = errorStr.str();
2275                return false;
2276            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2277                                                         primaryColorCount, primaryRPCI->pAttachments,
2278                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2279                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2280                stringstream errorStr;
2281                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2282                errorMsg = errorStr.str();
2283                return false;
2284            }
2285        }
2286
2287        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2288                                              1, primaryRPCI->pAttachments,
2289                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2290                                              1, secondaryRPCI->pAttachments)) {
2291            stringstream errorStr;
2292            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2293            errorMsg = errorStr.str();
2294            return false;
2295        }
2296
2297        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2298        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2299        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2300        for (uint32_t i = 0; i < inputMax; ++i) {
2301            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2302                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2303                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2304                stringstream errorStr;
2305                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2306                errorMsg = errorStr.str();
2307                return false;
2308            }
2309        }
2310    }
2311    return true;
2312}
2313
2314// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2315// pipelineLayout[layoutIndex]
2316static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *descriptor_set,
2317                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2318                                            string &errorMsg) {
2319    auto num_sets = pipeline_layout->set_layouts.size();
2320    if (layoutIndex >= num_sets) {
2321        stringstream errorStr;
2322        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2323                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2324                 << layoutIndex;
2325        errorMsg = errorStr.str();
2326        return false;
2327    }
2328    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2329    return descriptor_set->IsCompatible(layout_node, &errorMsg);
2330}
2331
2332// Validate that data for each specialization entry is fully contained within the buffer.
2333static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2334    bool pass = true;
2335
2336    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2337
2338    if (spec) {
2339        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2340            // TODO: This is a good place for VALIDATION_ERROR_00589.
2341            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2342                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
2343                            VALIDATION_ERROR_00590, "SC",
2344                            "Specialization entry %u (for constant id %u) references memory outside provided "
2345                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2346                            " bytes provided). %s.",
2347                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2348                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize,
2349                            validation_error_map[VALIDATION_ERROR_00590])) {
2350
2351                    pass = false;
2352                }
2353            }
2354        }
2355    }
2356
2357    return pass;
2358}
2359
2360static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2361                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2362    auto type = module->get_def(type_id);
2363
2364    descriptor_count = 1;
2365
2366    // Strip off any array or ptrs. Where we remove array levels, adjust the  descriptor count for each dimension.
2367    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2368        if (type.opcode() == spv::OpTypeArray) {
2369            descriptor_count *= get_constant_value(module, type.word(3));
2370            type = module->get_def(type.word(2));
2371        }
2372        else {
2373            type = module->get_def(type.word(3));
2374        }
2375    }
2376
2377    switch (type.opcode()) {
2378    case spv::OpTypeStruct: {
2379        for (auto insn : *module) {
2380            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2381                if (insn.word(2) == spv::DecorationBlock) {
2382                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2383                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2384                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2385                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2386                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2387                }
2388            }
2389        }
2390
2391        // Invalid
2392        return false;
2393    }
2394
2395    case spv::OpTypeSampler:
2396        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2397            descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2398
2399    case spv::OpTypeSampledImage:
2400        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2401            // Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel
2402            // buffer descriptor doesn't really provide one. Allow this slight mismatch.
2403            auto image_type = module->get_def(type.word(2));
2404            auto dim = image_type.word(3);
2405            auto sampled = image_type.word(7);
2406            return dim == spv::DimBuffer && sampled == 1;
2407        }
2408        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2409
2410    case spv::OpTypeImage: {
2411        // Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler.
2412        // SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable.
2413        auto dim = type.word(3);
2414        auto sampled = type.word(7);
2415
2416        if (dim == spv::DimSubpassData) {
2417            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2418        } else if (dim == spv::DimBuffer) {
2419            if (sampled == 1) {
2420                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2421            } else {
2422                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2423            }
2424        } else if (sampled == 1) {
2425            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2426                descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2427        } else {
2428            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2429        }
2430    }
2431
2432    // We shouldn't really see any other junk types -- but if we do, they're a mismatch.
2433    default:
2434        return false; // Mismatch
2435    }
2436}
2437
2438static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2439    if (!feature) {
2440        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2441                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2442                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2443                    "enabled on the device",
2444                    feature_name)) {
2445            return false;
2446        }
2447    }
2448
2449    return true;
2450}
2451
2452static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2453                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2454    bool pass = true;
2455
2456
2457    for (auto insn : *src) {
2458        if (insn.opcode() == spv::OpCapability) {
2459            switch (insn.word(1)) {
2460            case spv::CapabilityMatrix:
2461            case spv::CapabilityShader:
2462            case spv::CapabilityInputAttachment:
2463            case spv::CapabilitySampled1D:
2464            case spv::CapabilityImage1D:
2465            case spv::CapabilitySampledBuffer:
2466            case spv::CapabilityImageBuffer:
2467            case spv::CapabilityImageQuery:
2468            case spv::CapabilityDerivativeControl:
2469                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2470                break;
2471
2472            case spv::CapabilityGeometry:
2473                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2474                break;
2475
2476            case spv::CapabilityTessellation:
2477                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2478                break;
2479
2480            case spv::CapabilityFloat64:
2481                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2482                break;
2483
2484            case spv::CapabilityInt64:
2485                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2486                break;
2487
2488            case spv::CapabilityTessellationPointSize:
2489            case spv::CapabilityGeometryPointSize:
2490                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2491                                        "shaderTessellationAndGeometryPointSize");
2492                break;
2493
2494            case spv::CapabilityImageGatherExtended:
2495                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2496                break;
2497
2498            case spv::CapabilityStorageImageMultisample:
2499                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2500                break;
2501
2502            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2503                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2504                                        "shaderUniformBufferArrayDynamicIndexing");
2505                break;
2506
2507            case spv::CapabilitySampledImageArrayDynamicIndexing:
2508                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2509                                        "shaderSampledImageArrayDynamicIndexing");
2510                break;
2511
2512            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2513                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2514                                        "shaderStorageBufferArrayDynamicIndexing");
2515                break;
2516
2517            case spv::CapabilityStorageImageArrayDynamicIndexing:
2518                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2519                                        "shaderStorageImageArrayDynamicIndexing");
2520                break;
2521
2522            case spv::CapabilityClipDistance:
2523                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2524                break;
2525
2526            case spv::CapabilityCullDistance:
2527                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2528                break;
2529
2530            case spv::CapabilityImageCubeArray:
2531                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2532                break;
2533
2534            case spv::CapabilitySampleRateShading:
2535                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2536                break;
2537
2538            case spv::CapabilitySparseResidency:
2539                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2540                break;
2541
2542            case spv::CapabilityMinLod:
2543                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2544                break;
2545
2546            case spv::CapabilitySampledCubeArray:
2547                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2548                break;
2549
2550            case spv::CapabilityImageMSArray:
2551                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2552                break;
2553
2554            case spv::CapabilityStorageImageExtendedFormats:
2555                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2556                                        "shaderStorageImageExtendedFormats");
2557                break;
2558
2559            case spv::CapabilityInterpolationFunction:
2560                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2561                break;
2562
2563            case spv::CapabilityStorageImageReadWithoutFormat:
2564                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2565                                        "shaderStorageImageReadWithoutFormat");
2566                break;
2567
2568            case spv::CapabilityStorageImageWriteWithoutFormat:
2569                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2570                                        "shaderStorageImageWriteWithoutFormat");
2571                break;
2572
2573            case spv::CapabilityMultiViewport:
2574                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2575                break;
2576
2577            default:
2578                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2579                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2580                            "Shader declares capability %u, not supported in Vulkan.",
2581                            insn.word(1)))
2582                    pass = false;
2583                break;
2584            }
2585        }
2586    }
2587
2588    return pass;
2589}
2590
2591
2592static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2593    auto type = module->get_def(type_id);
2594
2595    while (true) {
2596        switch (type.opcode()) {
2597        case spv::OpTypeArray:
2598        case spv::OpTypeSampledImage:
2599            type = module->get_def(type.word(2));
2600            break;
2601        case spv::OpTypePointer:
2602            type = module->get_def(type.word(3));
2603            break;
2604        case spv::OpTypeImage: {
2605            auto dim = type.word(3);
2606            auto arrayed = type.word(5);
2607            auto msaa = type.word(6);
2608
2609            switch (dim) {
2610            case spv::Dim1D:
2611                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2612            case spv::Dim2D:
2613                return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2614                    (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2615            case spv::Dim3D:
2616                return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2617            case spv::DimCube:
2618                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2619            case spv::DimSubpassData:
2620                return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2621            default:  // buffer, etc.
2622                return 0;
2623            }
2624        }
2625        default:
2626            return 0;
2627        }
2628    }
2629}
2630
2631static bool
2632validate_pipeline_shader_stage(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *pStage,
2633                               PIPELINE_STATE *pipeline, shader_module **out_module, spirv_inst_iter *out_entrypoint,
2634                               VkPhysicalDeviceFeatures const *enabledFeatures,
2635                               std::unordered_map<VkShaderModule, std::unique_ptr<shader_module>> const &shaderModuleMap) {
2636    bool pass = true;
2637    auto module_it = shaderModuleMap.find(pStage->module);
2638    auto module = *out_module = module_it->second.get();
2639
2640    // Find the entrypoint
2641    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2642    if (entrypoint == module->end()) {
2643        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, VALIDATION_ERROR_00510,
2644                    "SC", "No entrypoint found named `%s` for stage %s. %s.", pStage->pName,
2645                    string_VkShaderStageFlagBits(pStage->stage), validation_error_map[VALIDATION_ERROR_00510])) {
2646            return false;   // no point continuing beyond here, any analysis is just going to be garbage.
2647        }
2648    }
2649
2650    // Validate shader capabilities against enabled device features
2651    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2652
2653    // Mark accessible ids
2654    auto accessible_ids = mark_accessible_ids(module, entrypoint);
2655
2656    // Validate descriptor set layout against what the entrypoint actually uses
2657    auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2658
2659    auto pipelineLayout = pipeline->pipeline_layout;
2660
2661    pass &= validate_specialization_offsets(report_data, pStage);
2662    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2663
2664    // Validate descriptor use
2665    for (auto use : descriptor_uses) {
2666        // While validating shaders capture which slots are used by the pipeline
2667        auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2668        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2669
2670        // Verify given pipelineLayout has requested setLayout with requested binding
2671        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2672        unsigned required_descriptor_count;
2673
2674        if (!binding) {
2675            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2676                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2677                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2678                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2679                pass = false;
2680            }
2681        } else if (~binding->stageFlags & pStage->stage) {
2682            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2683                        0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2684                        "Shader uses descriptor slot %u.%u (used "
2685                        "as type `%s`) but descriptor not "
2686                        "accessible from stage %s",
2687                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2688                        string_VkShaderStageFlagBits(pStage->stage))) {
2689                pass = false;
2690            }
2691        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2692                                          required_descriptor_count)) {
2693            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2694                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2695                                                                       "%u.%u (used as type `%s`) but "
2696                                                                       "descriptor of type %s",
2697                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2698                        string_VkDescriptorType(binding->descriptorType))) {
2699                pass = false;
2700            }
2701        } else if (binding->descriptorCount < required_descriptor_count) {
2702            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2703                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2704                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2705                        required_descriptor_count, use.first.first, use.first.second,
2706                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2707                pass = false;
2708            }
2709        }
2710    }
2711
2712    // Validate use of input attachments against subpass structure
2713    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2714        auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2715
2716        auto rpci = pipeline->render_pass_ci.ptr();
2717        auto subpass = pipeline->graphicsPipelineCI.subpass;
2718
2719        for (auto use : input_attachment_uses) {
2720            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2721            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2722                    input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2723
2724            if (index == VK_ATTACHMENT_UNUSED) {
2725                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2726                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2727                            "Shader consumes input attachment index %d but not provided in subpass",
2728                            use.first)) {
2729                    pass = false;
2730                }
2731            }
2732            else if (get_format_type(rpci->pAttachments[index].format) !=
2733                    get_fundamental_type(module, use.second.type_id)) {
2734                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2735                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2736                            "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2737                            use.first, string_VkFormat(rpci->pAttachments[index].format),
2738                            describe_type(module, use.second.type_id).c_str())) {
2739                    pass = false;
2740                }
2741            }
2742        }
2743    }
2744
2745    return pass;
2746}
2747
2748
2749// Validate that the shaders used by the given pipeline and store the active_slots
2750//  that are actually used by the pipeline into pPipeline->active_slots
2751static bool
2752validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2753                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2754                                           std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2755    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2756    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2757    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2758
2759    shader_module *shaders[5];
2760    memset(shaders, 0, sizeof(shaders));
2761    spirv_inst_iter entrypoints[5];
2762    memset(entrypoints, 0, sizeof(entrypoints));
2763    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2764    bool pass = true;
2765
2766    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2767        auto pStage = &pCreateInfo->pStages[i];
2768        auto stage_id = get_shader_stage_id(pStage->stage);
2769        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2770                                               &shaders[stage_id], &entrypoints[stage_id],
2771                                               enabledFeatures, shaderModuleMap);
2772    }
2773
2774    // if the shader stages are no good individually, cross-stage validation is pointless.
2775    if (!pass)
2776        return false;
2777
2778    vi = pCreateInfo->pVertexInputState;
2779
2780    if (vi) {
2781        pass &= validate_vi_consistency(report_data, vi);
2782    }
2783
2784    if (shaders[vertex_stage]) {
2785        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2786    }
2787
2788    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2789    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2790
2791    while (!shaders[producer] && producer != fragment_stage) {
2792        producer++;
2793        consumer++;
2794    }
2795
2796    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2797        assert(shaders[producer]);
2798        if (shaders[consumer]) {
2799            pass &= validate_interface_between_stages(report_data,
2800                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2801                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2802
2803            producer = consumer;
2804        }
2805    }
2806
2807    if (shaders[fragment_stage]) {
2808        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2809                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2810    }
2811
2812    return pass;
2813}
2814
2815static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2816                                      VkPhysicalDeviceFeatures const *enabledFeatures,
2817                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2818    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2819
2820    shader_module *module;
2821    spirv_inst_iter entrypoint;
2822
2823    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2824                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2825}
2826// Return Set node ptr for specified set or else NULL
2827cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2828    auto set_it = my_data->setMap.find(set);
2829    if (set_it == my_data->setMap.end()) {
2830        return NULL;
2831    }
2832    return set_it->second;
2833}
2834
2835// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2836static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
2837    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2838        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2839        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2840    }
2841    return VK_SAMPLE_COUNT_1_BIT;
2842}
2843
2844static void list_bits(std::ostream& s, uint32_t bits) {
2845    for (int i = 0; i < 32 && bits; i++) {
2846        if (bits & (1 << i)) {
2847            s << i;
2848            bits &= ~(1 << i);
2849            if (bits) {
2850                s << ",";
2851            }
2852        }
2853    }
2854}
2855
2856// Validate draw-time state related to the PSO
2857static bool ValidatePipelineDrawtimeState(layer_data const *my_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
2858                                          PIPELINE_STATE const *pPipeline) {
2859    bool skip_call = false;
2860
2861    // Verify vertex binding
2862    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2863        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2864            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
2865            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
2866                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
2867                skip_call |= log_msg(
2868                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2869                    DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2870                    "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
2871                    "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
2872                    "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
2873                    (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding);
2874            }
2875        }
2876    } else {
2877        if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
2878            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2879                                 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2880                                 "Vertex buffers are bound to command buffer (0x%p"
2881                                 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2882                                 pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline);
2883        }
2884    }
2885    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2886    // Skip check if rasterization is disabled or there is no viewport.
2887    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2888         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2889        pPipeline->graphicsPipelineCI.pViewportState) {
2890        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2891        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2892
2893        if (dynViewport) {
2894            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
2895            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
2896            if (missingViewportMask) {
2897                std::stringstream ss;
2898                ss << "Dynamic viewport(s) ";
2899                list_bits(ss, missingViewportMask);
2900                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
2901                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2902                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2903                                     "%s", ss.str().c_str());
2904            }
2905        }
2906
2907        if (dynScissor) {
2908            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
2909            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
2910            if (missingScissorMask) {
2911                std::stringstream ss;
2912                ss << "Dynamic scissor(s) ";
2913                list_bits(ss, missingScissorMask);
2914                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
2915                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2916                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2917                                     "%s", ss.str().c_str());
2918            }
2919        }
2920    }
2921
2922    // Verify that any MSAA request in PSO matches sample# in bound FB
2923    // Skip the check if rasterization is disabled.
2924    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2925        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2926        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2927        if (pCB->activeRenderPass) {
2928            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
2929            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2930            uint32_t i;
2931
2932            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2933            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2934                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2935                skip_call |=
2936                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2937                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
2938                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
2939                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
2940                                "must be the same at draw-time.",
2941                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
2942                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2943            }
2944
2945            unsigned subpass_num_samples = 0;
2946
2947            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2948                auto attachment = subpass_desc->pColorAttachments[i].attachment;
2949                if (attachment != VK_ATTACHMENT_UNUSED)
2950                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2951            }
2952
2953            if (subpass_desc->pDepthStencilAttachment &&
2954                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
2955                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
2956                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2957            }
2958
2959            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
2960                skip_call |=
2961                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2962                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2963                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2964                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2965                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2966                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2967            }
2968        } else {
2969            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2970                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2971                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2972                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2973        }
2974    }
2975    // Verify that PSO creation renderPass is compatible with active renderPass
2976    if (pCB->activeRenderPass) {
2977        std::string err_string;
2978        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
2979            !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
2980                                             err_string)) {
2981            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2982            skip_call |=
2983                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2984                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2985                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
2986                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
2987                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass),
2988                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
2989                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2990        }
2991
2992        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
2993            skip_call |=
2994                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2995                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2996                        "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
2997                        pCB->activeSubpass);
2998        }
2999    }
3000    // TODO : Add more checks here
3001
3002    return skip_call;
3003}
3004
3005// Validate overall state at the time of a draw call
3006static bool ValidateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
3007                              const VkPipelineBindPoint bind_point, const char *function,
3008                              UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
3009    bool result = false;
3010    auto const &state = cb_node->lastBound[bind_point];
3011    PIPELINE_STATE *pPipe = state.pipeline_state;
3012    if (nullptr == pPipe) {
3013        result |= log_msg(
3014            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3015            DRAWSTATE_INVALID_PIPELINE, "DS",
3016            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3017        // Early return as any further checks below will be busted w/o a pipeline
3018        if (result)
3019            return true;
3020    }
3021    // First check flag states
3022    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
3023        result = validate_draw_state_flags(my_data, cb_node, pPipe, indexed, msg_code);
3024
3025    // Now complete other state checks
3026    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3027        string errorString;
3028        auto pipeline_layout = pPipe->pipeline_layout;
3029
3030        for (const auto &set_binding_pair : pPipe->active_slots) {
3031            uint32_t setIndex = set_binding_pair.first;
3032            // If valid set is not bound throw an error
3033            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3034                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3035                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
3036                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
3037                                  setIndex);
3038            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3039                                                        errorString)) {
3040                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
3041                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
3042                result |=
3043                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3044                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
3045                            "VkDescriptorSet (0x%" PRIxLEAST64
3046                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
3047                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3048                            errorString.c_str());
3049            } else { // Valid set is bound and layout compatible, validate that it's updated
3050                // Pull the set node
3051                cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
3052                // Gather active bindings
3053                std::unordered_set<uint32_t> active_bindings;
3054                for (auto binding : set_binding_pair.second) {
3055                    active_bindings.insert(binding.first);
3056                }
3057                // Make sure set has been updated if it has no immutable samplers
3058                //  If it has immutable samplers, we'll flag error later as needed depending on binding
3059                if (!descriptor_set->IsUpdated()) {
3060                    for (auto binding : active_bindings) {
3061                        if (!descriptor_set->GetImmutableSamplerPtrFromBinding(binding)) {
3062                            result |= log_msg(
3063                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3064                                (uint64_t)descriptor_set->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3065                                "Descriptor Set 0x%" PRIxLEAST64 " bound but was never updated. It is now being used to draw so "
3066                                "this will result in undefined behavior.",
3067                                (uint64_t)descriptor_set->GetSet());
3068                        }
3069                    }
3070                }
3071                // Validate the draw-time state for this descriptor set
3072                std::string err_str;
3073                if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], &err_str)) {
3074                    auto set = descriptor_set->GetSet();
3075                    result |=
3076                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3077                                reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3078                                "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
3079                                reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
3080                }
3081            }
3082        }
3083    }
3084
3085    // Check general pipeline state that needs to be validated at drawtime
3086    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
3087        result |= ValidatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
3088
3089    return result;
3090}
3091
3092static void UpdateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
3093    auto const &state = cb_state->lastBound[bind_point];
3094    PIPELINE_STATE *pPipe = state.pipeline_state;
3095    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3096        for (const auto &set_binding_pair : pPipe->active_slots) {
3097            uint32_t setIndex = set_binding_pair.first;
3098            // Pull the set node
3099            cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
3100            // Bind this set and its active descriptor resources to the command buffer
3101            descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
3102            // For given active slots record updated images & buffers
3103            descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
3104        }
3105    }
3106    if (pPipe->vertexBindingDescriptions.size() > 0) {
3107        cb_state->vertex_buffer_used = true;
3108    }
3109}
3110
3111// Validate HW line width capabilities prior to setting requested line width.
3112static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3113    bool skip_call = false;
3114
3115    // First check to see if the physical device supports wide lines.
3116    if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
3117        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3118                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3119                                            "not supported/enabled so lineWidth must be 1.0f!",
3120                             lineWidth);
3121    } else {
3122        // Otherwise, make sure the width falls in the valid range.
3123        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3124            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3125            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3126                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3127                                                          "to between [%f, %f]!",
3128                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3129                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3130        }
3131    }
3132
3133    return skip_call;
3134}
3135
3136// Verify that create state for a pipeline is valid
3137static bool verifyPipelineCreateState(layer_data *my_data, std::vector<PIPELINE_STATE *> pPipelines, int pipelineIndex) {
3138    bool skip_call = false;
3139
3140    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
3141
3142    // If create derivative bit is set, check that we've specified a base
3143    // pipeline correctly, and that the base pipeline was created to allow
3144    // derivatives.
3145    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3146        PIPELINE_STATE *pBasePipeline = nullptr;
3147        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3148              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3149            // This check is a superset of VALIDATION_ERROR_00526 and VALIDATION_ERROR_00528
3150            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3151                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3152                                 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3153        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3154            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3155                skip_call |=
3156                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3157                            VALIDATION_ERROR_00518, "DS",
3158                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
3159                            validation_error_map[VALIDATION_ERROR_00518]);
3160            } else {
3161                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3162            }
3163        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3164            pBasePipeline = getPipelineState(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3165        }
3166
3167        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3168            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3169                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3170                                 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3171        }
3172    }
3173
3174    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3175        if (!my_data->enabled_features.independentBlend) {
3176            if (pPipeline->attachments.size() > 1) {
3177                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3178                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3179                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3180                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3181                    // only attachment state, so memcmp is best suited for the comparison
3182                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3183                               sizeof(pAttachments[0]))) {
3184                        skip_call |=
3185                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3186                                    VALIDATION_ERROR_01532, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3187                                                                  "enabled, all elements of pAttachments must be identical. %s",
3188                                    validation_error_map[VALIDATION_ERROR_01532]);
3189                        break;
3190                    }
3191                }
3192            }
3193        }
3194        if (!my_data->enabled_features.logicOp &&
3195            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3196            skip_call |=
3197                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3198                        VALIDATION_ERROR_01533, "DS",
3199                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
3200                        validation_error_map[VALIDATION_ERROR_01533]);
3201        }
3202    }
3203
3204    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3205    // produces nonsense errors that confuse users. Other layers should already
3206    // emit errors for renderpass being invalid.
3207    auto renderPass = getRenderPassState(my_data, pPipeline->graphicsPipelineCI.renderPass);
3208    if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
3209        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3210                             VALIDATION_ERROR_02122, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3211                                                           "is out of range for this renderpass (0..%u). %s",
3212                             pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1,
3213                             validation_error_map[VALIDATION_ERROR_02122]);
3214    }
3215
3216    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features,
3217                                                    my_data->shaderModuleMap)) {
3218        skip_call = true;
3219    }
3220    // Each shader's stage must be unique
3221    if (pPipeline->duplicate_shaders) {
3222        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3223            if (pPipeline->duplicate_shaders & stage) {
3224                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3225                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3226                                     "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3227                                     string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3228            }
3229        }
3230    }
3231    // VS is required
3232    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3233        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3234                             VALIDATION_ERROR_00532, "DS", "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
3235                             validation_error_map[VALIDATION_ERROR_00532]);
3236    }
3237    // Either both or neither TC/TE shaders should be defined
3238    if ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3239        !(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3240        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3241                             VALIDATION_ERROR_00534, "DS",
3242                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3243                             validation_error_map[VALIDATION_ERROR_00534]);
3244    }
3245    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3246        (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3247        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3248                             VALIDATION_ERROR_00535, "DS",
3249                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3250                             validation_error_map[VALIDATION_ERROR_00535]);
3251    }
3252    // Compute shaders should be specified independent of Gfx shaders
3253    if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
3254        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3255                             VALIDATION_ERROR_00533, "DS",
3256                             "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
3257                             validation_error_map[VALIDATION_ERROR_00533]);
3258    }
3259    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3260    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3261    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3262        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3263         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3264        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3265                             VALIDATION_ERROR_02099, "DS", "Invalid Pipeline CreateInfo State: "
3266                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3267                                                           "topology for tessellation pipelines. %s",
3268                             validation_error_map[VALIDATION_ERROR_02099]);
3269    }
3270    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3271        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3272        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3273            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3274                                 VALIDATION_ERROR_02100, "DS", "Invalid Pipeline CreateInfo State: "
3275                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3276                                                               "topology is only valid for tessellation pipelines. %s",
3277                                 validation_error_map[VALIDATION_ERROR_02100]);
3278        }
3279    }
3280
3281    if (pPipeline->graphicsPipelineCI.pTessellationState &&
3282        ((pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints == 0) ||
3283         (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints >
3284          my_data->phys_dev_properties.properties.limits.maxTessellationPatchSize))) {
3285        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3286                             VALIDATION_ERROR_01426, "DS", "Invalid Pipeline CreateInfo State: "
3287                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3288                                                           "topology used with patchControlPoints value %u."
3289                                                           " patchControlPoints should be >0 and <=%u. %s",
3290                             pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints,
3291                             my_data->phys_dev_properties.properties.limits.maxTessellationPatchSize,
3292                             validation_error_map[VALIDATION_ERROR_01426]);
3293    }
3294
3295    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3296    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3297        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3298            skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
3299                                         reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
3300                                         pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3301        }
3302    }
3303
3304    // If rasterization is not disabled and subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a
3305    // valid structure
3306    if (pPipeline->graphicsPipelineCI.pRasterizationState &&
3307        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3308        auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3309        if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3310            subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3311            if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3312                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
3313                                     0, __LINE__, VALIDATION_ERROR_02115, "DS",
3314                                     "Invalid Pipeline CreateInfo State: "
3315                                     "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3316                                     "depth/stencil attachment. %s",
3317                                     validation_error_map[VALIDATION_ERROR_02115]);
3318            }
3319        }
3320    }
3321    return skip_call;
3322}
3323
3324// Free the Pipeline nodes
3325static void deletePipelines(layer_data *my_data) {
3326    if (my_data->pipelineMap.size() <= 0)
3327        return;
3328    for (auto &pipe_map_pair : my_data->pipelineMap) {
3329        delete pipe_map_pair.second;
3330    }
3331    my_data->pipelineMap.clear();
3332}
3333
3334// Block of code at start here specifically for managing/tracking DSs
3335
3336// Return Pool node ptr for specified pool or else NULL
3337DESCRIPTOR_POOL_STATE *getDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
3338    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3339    if (pool_it == dev_data->descriptorPoolMap.end()) {
3340        return NULL;
3341    }
3342    return pool_it->second;
3343}
3344
3345// Return false if update struct is of valid type, otherwise flag error and return code from callback
3346static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3347    switch (pUpdateStruct->sType) {
3348    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3349    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3350        return false;
3351    default:
3352        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3353                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3354                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3355                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3356    }
3357}
3358
3359// Set count for given update struct in the last parameter
3360static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3361    switch (pUpdateStruct->sType) {
3362    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3363        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3364    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3365        // TODO : Need to understand this case better and make sure code is correct
3366        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3367    default:
3368        return 0;
3369    }
3370}
3371
3372// For given layout and update, return the first overall index of the layout that is updated
3373static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3374                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3375    return binding_start_index + arrayIndex;
3376}
3377// For given layout and update, return the last overall index of the layout that is updated
3378static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3379                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3380    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3381    return binding_start_index + arrayIndex + count - 1;
3382}
3383// Verify that the descriptor type in the update struct matches what's expected by the layout
3384static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3385                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3386    // First get actual type of update
3387    bool skip_call = false;
3388    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3389    switch (pUpdateStruct->sType) {
3390    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3391        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3392        break;
3393    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3394        // No need to validate
3395        return false;
3396        break;
3397    default:
3398        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3399                             DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3400                             "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3401                             string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3402    }
3403    if (!skip_call) {
3404        if (layout_type != actualType) {
3405            skip_call |= log_msg(
3406                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3407                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3408                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3409                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3410        }
3411    }
3412    return skip_call;
3413}
3414//TODO: Consolidate functions
3415bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3416    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3417    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3418        return false;
3419    }
3420    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3421    imgpair.subresource.aspectMask = aspectMask;
3422    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3423    if (imgsubIt == pCB->imageLayoutMap.end()) {
3424        return false;
3425    }
3426    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3427        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3428                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3429                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3430                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3431    }
3432    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3433        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3434                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3435                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3436                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3437    }
3438    node = imgsubIt->second;
3439    return true;
3440}
3441
3442bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3443    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3444        return false;
3445    }
3446    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3447    imgpair.subresource.aspectMask = aspectMask;
3448    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3449    if (imgsubIt == my_data->imageLayoutMap.end()) {
3450        return false;
3451    }
3452    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3453        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3454                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3455                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3456                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3457    }
3458    layout = imgsubIt->second.layout;
3459    return true;
3460}
3461
3462// find layout(s) on the cmd buf level
3463bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3464    ImageSubresourcePair imgpair = {image, true, range};
3465    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3466    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3467    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3468    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3469    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3470    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3471        imgpair = {image, false, VkImageSubresource()};
3472        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3473        if (imgsubIt == pCB->imageLayoutMap.end())
3474            return false;
3475        node = imgsubIt->second;
3476    }
3477    return true;
3478}
3479
3480// find layout(s) on the global level
3481bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3482    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3483    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3484    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3485    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3486    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3487    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3488        imgpair = {imgpair.image, false, VkImageSubresource()};
3489        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3490        if (imgsubIt == my_data->imageLayoutMap.end())
3491            return false;
3492        layout = imgsubIt->second.layout;
3493    }
3494    return true;
3495}
3496
3497bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3498    ImageSubresourcePair imgpair = {image, true, range};
3499    return FindLayout(my_data, imgpair, layout);
3500}
3501
3502bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3503    auto sub_data = my_data->imageSubresourceMap.find(image);
3504    if (sub_data == my_data->imageSubresourceMap.end())
3505        return false;
3506    auto image_state = getImageState(my_data, image);
3507    if (!image_state)
3508        return false;
3509    bool ignoreGlobal = false;
3510    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3511    // potential errors in this case.
3512    if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) {
3513        ignoreGlobal = true;
3514    }
3515    for (auto imgsubpair : sub_data->second) {
3516        if (ignoreGlobal && !imgsubpair.hasSubresource)
3517            continue;
3518        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3519        if (img_data != my_data->imageLayoutMap.end()) {
3520            layouts.push_back(img_data->second.layout);
3521        }
3522    }
3523    return true;
3524}
3525
3526// Set the layout on the global level
3527void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3528    VkImage &image = imgpair.image;
3529    // TODO (mlentine): Maybe set format if new? Not used atm.
3530    my_data->imageLayoutMap[imgpair].layout = layout;
3531    // TODO (mlentine): Maybe make vector a set?
3532    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3533    if (subresource == my_data->imageSubresourceMap[image].end()) {
3534        my_data->imageSubresourceMap[image].push_back(imgpair);
3535    }
3536}
3537
3538// Set the layout on the cmdbuf level
3539void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3540    pCB->imageLayoutMap[imgpair] = node;
3541    // TODO (mlentine): Maybe make vector a set?
3542    auto subresource =
3543        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3544    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3545        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3546    }
3547}
3548
3549void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3550    // TODO (mlentine): Maybe make vector a set?
3551    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3552        pCB->imageSubresourceMap[imgpair.image].end()) {
3553        pCB->imageLayoutMap[imgpair].layout = layout;
3554    } else {
3555        // TODO (mlentine): Could be expensive and might need to be removed.
3556        assert(imgpair.hasSubresource);
3557        IMAGE_CMD_BUF_LAYOUT_NODE node;
3558        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3559            node.initialLayout = layout;
3560        }
3561        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3562    }
3563}
3564
3565template <class OBJECT, class LAYOUT>
3566void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3567    if (imgpair.subresource.aspectMask & aspectMask) {
3568        imgpair.subresource.aspectMask = aspectMask;
3569        SetLayout(pObject, imgpair, layout);
3570    }
3571}
3572
3573template <class OBJECT, class LAYOUT>
3574void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3575    ImageSubresourcePair imgpair = {image, true, range};
3576    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3577    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3578    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3579    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3580}
3581
3582template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3583    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3584    SetLayout(pObject, image, imgpair, layout);
3585}
3586
3587void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3588    auto view_state = getImageViewState(dev_data, imageView);
3589    assert(view_state);
3590    auto image = view_state->create_info.image;
3591    const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
3592    // TODO: Do not iterate over every possibility - consolidate where possible
3593    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3594        uint32_t level = subRange.baseMipLevel + j;
3595        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3596            uint32_t layer = subRange.baseArrayLayer + k;
3597            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3598            // TODO: If ImageView was created with depth or stencil, transition both layouts as
3599            // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3600            // is OK for descriptor set layout validation
3601            if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3602                if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
3603                    sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3604                }
3605            }
3606            SetLayout(pCB, image, sub, layout);
3607        }
3608    }
3609}
3610
3611// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3612// func_str is the name of the calling function
3613// Return false if no errors occur
3614// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3615static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
3616    if (dev_data->instance_data->disabled.idle_descriptor_set)
3617        return false;
3618    bool skip_call = false;
3619    auto set_node = dev_data->setMap.find(set);
3620    if (set_node == dev_data->setMap.end()) {
3621        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3622                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3623                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3624                             (uint64_t)(set));
3625    } else {
3626        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
3627        if (set_node->second->in_use.load()) {
3628            skip_call |=
3629                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3630                        (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3631                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3632                        func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
3633        }
3634    }
3635    return skip_call;
3636}
3637
3638// Remove set from setMap and delete the set
3639static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3640    dev_data->setMap.erase(descriptor_set->GetSet());
3641    delete descriptor_set;
3642}
3643// Free all DS Pools including their Sets & related sub-structs
3644// NOTE : Calls to this function should be wrapped in mutex
3645static void deletePools(layer_data *my_data) {
3646    if (my_data->descriptorPoolMap.size() <= 0)
3647        return;
3648    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3649        // Remove this pools' sets from setMap and delete them
3650        for (auto ds : (*ii).second->sets) {
3651            freeDescriptorSet(my_data, ds);
3652        }
3653        (*ii).second->sets.clear();
3654    }
3655    my_data->descriptorPoolMap.clear();
3656}
3657
3658static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3659                                VkDescriptorPoolResetFlags flags) {
3660    DESCRIPTOR_POOL_STATE *pPool = getDescriptorPoolState(my_data, pool);
3661    // TODO: validate flags
3662    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3663    for (auto ds : pPool->sets) {
3664        freeDescriptorSet(my_data, ds);
3665    }
3666    pPool->sets.clear();
3667    // Reset available count for each type and available sets for this pool
3668    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3669        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3670    }
3671    pPool->availableSets = pPool->maxSets;
3672}
3673
3674// For given CB object, fetch associated CB Node from map
3675static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3676    auto it = my_data->commandBufferMap.find(cb);
3677    if (it == my_data->commandBufferMap.end()) {
3678        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3679                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3680                "Attempt to use CommandBuffer 0x%p that doesn't exist!", cb);
3681        return NULL;
3682    }
3683    return it->second;
3684}
3685// Free all CB Nodes
3686// NOTE : Calls to this function should be wrapped in mutex
3687static void deleteCommandBuffers(layer_data *my_data) {
3688    if (my_data->commandBufferMap.empty()) {
3689        return;
3690    }
3691    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3692        delete (*ii).second;
3693    }
3694    my_data->commandBufferMap.clear();
3695}
3696
3697static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3698    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3699                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3700                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3701}
3702
3703// If a renderpass is active, verify that the given command type is appropriate for current subpass state
3704bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3705    if (!pCB->activeRenderPass)
3706        return false;
3707    bool skip_call = false;
3708    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3709        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3710        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3711                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3712                             "Commands cannot be called in a subpass using secondary command buffers.");
3713    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3714        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3715                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3716                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3717    }
3718    return skip_call;
3719}
3720
3721static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3722    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3723        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3724                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3725                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3726    return false;
3727}
3728
3729static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3730    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3731        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3732                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3733                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3734    return false;
3735}
3736
3737static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3738    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3739        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3740                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3741                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3742    return false;
3743}
3744
3745// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not
3746//  in the recording state or if there's an issue with the Cmd ordering
3747static bool ValidateCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3748    bool skip_call = false;
3749    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3750    if (pPool) {
3751        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3752        switch (cmd) {
3753        case CMD_BINDPIPELINE:
3754        case CMD_BINDPIPELINEDELTA:
3755        case CMD_BINDDESCRIPTORSETS:
3756        case CMD_FILLBUFFER:
3757        case CMD_CLEARCOLORIMAGE:
3758        case CMD_SETEVENT:
3759        case CMD_RESETEVENT:
3760        case CMD_WAITEVENTS:
3761        case CMD_BEGINQUERY:
3762        case CMD_ENDQUERY:
3763        case CMD_RESETQUERYPOOL:
3764        case CMD_COPYQUERYPOOLRESULTS:
3765        case CMD_WRITETIMESTAMP:
3766            skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3767            break;
3768        case CMD_SETVIEWPORTSTATE:
3769        case CMD_SETSCISSORSTATE:
3770        case CMD_SETLINEWIDTHSTATE:
3771        case CMD_SETDEPTHBIASSTATE:
3772        case CMD_SETBLENDSTATE:
3773        case CMD_SETDEPTHBOUNDSSTATE:
3774        case CMD_SETSTENCILREADMASKSTATE:
3775        case CMD_SETSTENCILWRITEMASKSTATE:
3776        case CMD_SETSTENCILREFERENCESTATE:
3777        case CMD_BINDINDEXBUFFER:
3778        case CMD_BINDVERTEXBUFFER:
3779        case CMD_DRAW:
3780        case CMD_DRAWINDEXED:
3781        case CMD_DRAWINDIRECT:
3782        case CMD_DRAWINDEXEDINDIRECT:
3783        case CMD_BLITIMAGE:
3784        case CMD_CLEARATTACHMENTS:
3785        case CMD_CLEARDEPTHSTENCILIMAGE:
3786        case CMD_RESOLVEIMAGE:
3787        case CMD_BEGINRENDERPASS:
3788        case CMD_NEXTSUBPASS:
3789        case CMD_ENDRENDERPASS:
3790            skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3791            break;
3792        case CMD_DISPATCH:
3793        case CMD_DISPATCHINDIRECT:
3794            skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3795            break;
3796        case CMD_COPYBUFFER:
3797        case CMD_COPYIMAGE:
3798        case CMD_COPYBUFFERTOIMAGE:
3799        case CMD_COPYIMAGETOBUFFER:
3800        case CMD_CLONEIMAGEDATA:
3801        case CMD_UPDATEBUFFER:
3802        case CMD_PIPELINEBARRIER:
3803        case CMD_EXECUTECOMMANDS:
3804        case CMD_END:
3805            break;
3806        default:
3807            break;
3808        }
3809    }
3810    if (pCB->state != CB_RECORDING) {
3811        skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3812    } else {
3813        skip_call |= ValidateCmdSubpassState(my_data, pCB, cmd);
3814    }
3815    return skip_call;
3816}
3817
3818static void UpdateCmdBufferLastCmd(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd) {
3819    if (cb_state->state == CB_RECORDING) {
3820        cb_state->last_cmd = cmd;
3821    }
3822}
3823// For given object struct return a ptr of BASE_NODE type for its wrapping struct
3824BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
3825    BASE_NODE *base_ptr = nullptr;
3826    switch (object_struct.type) {
3827    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
3828        base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
3829        break;
3830    }
3831    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
3832        base_ptr = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
3833        break;
3834    }
3835    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
3836        base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
3837        break;
3838    }
3839    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
3840        base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
3841        break;
3842    }
3843    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
3844        base_ptr = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
3845        break;
3846    }
3847    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
3848        base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
3849        break;
3850    }
3851    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
3852        base_ptr = getImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
3853        break;
3854    }
3855    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
3856        base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
3857        break;
3858    }
3859    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
3860        base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
3861        break;
3862    }
3863    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
3864        base_ptr = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
3865        break;
3866    }
3867    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
3868        base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
3869        break;
3870    }
3871    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
3872        base_ptr = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
3873        break;
3874    }
3875    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
3876        base_ptr = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
3877        break;
3878    }
3879    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
3880        base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
3881        break;
3882    }
3883    default:
3884        // TODO : Any other objects to be handled here?
3885        assert(0);
3886        break;
3887    }
3888    return base_ptr;
3889}
3890
3891// Tie the VK_OBJECT to the cmd buffer which includes:
3892//  Add object_binding to cmd buffer
3893//  Add cb_binding to object
3894static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
3895    cb_bindings->insert(cb_node);
3896    cb_node->object_bindings.insert(obj);
3897}
3898// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
3899static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
3900    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
3901    if (base_obj)
3902        base_obj->cb_bindings.erase(cb_node);
3903}
3904// Reset the command buffer state
3905//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3906static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3907    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3908    if (pCB) {
3909        pCB->in_use.store(0);
3910        pCB->last_cmd = CMD_NONE;
3911        // Reset CB state (note that createInfo is not cleared)
3912        pCB->commandBuffer = cb;
3913        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3914        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3915        pCB->numCmds = 0;
3916        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3917        pCB->state = CB_NEW;
3918        pCB->submitCount = 0;
3919        pCB->status = 0;
3920        pCB->viewportMask = 0;
3921        pCB->scissorMask = 0;
3922
3923        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3924            pCB->lastBound[i].reset();
3925        }
3926
3927        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3928        pCB->activeRenderPass = nullptr;
3929        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3930        pCB->activeSubpass = 0;
3931        pCB->broken_bindings.clear();
3932        pCB->waitedEvents.clear();
3933        pCB->events.clear();
3934        pCB->writeEventsBeforeWait.clear();
3935        pCB->waitedEventsBeforeQueryReset.clear();
3936        pCB->queryToStateMap.clear();
3937        pCB->activeQueries.clear();
3938        pCB->startedQueries.clear();
3939        pCB->imageSubresourceMap.clear();
3940        pCB->imageLayoutMap.clear();
3941        pCB->eventToStageMap.clear();
3942        pCB->drawData.clear();
3943        pCB->currentDrawData.buffers.clear();
3944        pCB->vertex_buffer_used = false;
3945        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3946        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3947        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3948            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3949        }
3950        pCB->secondaryCommandBuffers.clear();
3951        pCB->updateImages.clear();
3952        pCB->updateBuffers.clear();
3953        clear_cmd_buf_and_mem_references(dev_data, pCB);
3954        pCB->eventUpdates.clear();
3955        pCB->queryUpdates.clear();
3956
3957        // Remove object bindings
3958        for (auto obj : pCB->object_bindings) {
3959            removeCommandBufferBinding(dev_data, &obj, pCB);
3960        }
3961        pCB->object_bindings.clear();
3962        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3963        for (auto framebuffer : pCB->framebuffers) {
3964            auto fb_state = getFramebufferState(dev_data, framebuffer);
3965            if (fb_state)
3966                fb_state->cb_bindings.erase(pCB);
3967        }
3968        pCB->framebuffers.clear();
3969        pCB->activeFramebuffer = VK_NULL_HANDLE;
3970    }
3971}
3972
3973// Set PSO-related status bits for CB, including dynamic state set via PSO
3974static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
3975    // Account for any dynamic state not set via this PSO
3976    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3977        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
3978        pCB->status |= CBSTATUS_ALL_STATE_SET;
3979    } else {
3980        // First consider all state on
3981        // Then unset any state that's noted as dynamic in PSO
3982        // Finally OR that into CB statemask
3983        CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
3984        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3985            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3986            case VK_DYNAMIC_STATE_LINE_WIDTH:
3987                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3988                break;
3989            case VK_DYNAMIC_STATE_DEPTH_BIAS:
3990                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3991                break;
3992            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3993                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3994                break;
3995            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3996                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3997                break;
3998            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3999                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4000                break;
4001            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4002                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4003                break;
4004            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4005                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4006                break;
4007            default:
4008                // TODO : Flag error here
4009                break;
4010            }
4011        }
4012        pCB->status |= psoDynStateMask;
4013    }
4014}
4015
4016// Flags validation error if the associated call is made inside a render pass. The apiName
4017// routine should ONLY be called outside a render pass.
4018static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName,
4019                             UNIQUE_VALIDATION_ERROR_CODE msgCode) {
4020    bool inside = false;
4021    if (pCB->activeRenderPass) {
4022        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4023                         (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
4024                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
4025                         (uint64_t)pCB->activeRenderPass->renderPass, validation_error_map[msgCode]);
4026    }
4027    return inside;
4028}
4029
4030// Flags validation error if the associated call is made outside a render pass. The apiName
4031// routine should ONLY be called inside a render pass.
4032static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName,
4033                              UNIQUE_VALIDATION_ERROR_CODE msgCode) {
4034    bool outside = false;
4035    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4036        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4037         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4038        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4039                          (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
4040                          "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
4041    }
4042    return outside;
4043}
4044
4045static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4046
4047    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4048
4049}
4050
4051static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
4052    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4053        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
4054            instance_data->surfaceExtensionEnabled = true;
4055        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
4056            instance_data->displayExtensionEnabled = true;
4057#ifdef VK_USE_PLATFORM_ANDROID_KHR
4058        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
4059            instance_data->androidSurfaceExtensionEnabled = true;
4060#endif
4061#ifdef VK_USE_PLATFORM_MIR_KHR
4062        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
4063            instance_data->mirSurfaceExtensionEnabled = true;
4064#endif
4065#ifdef VK_USE_PLATFORM_WAYLAND_KHR
4066        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
4067            instance_data->waylandSurfaceExtensionEnabled = true;
4068#endif
4069#ifdef VK_USE_PLATFORM_WIN32_KHR
4070        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
4071            instance_data->win32SurfaceExtensionEnabled = true;
4072#endif
4073#ifdef VK_USE_PLATFORM_XCB_KHR
4074        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
4075            instance_data->xcbSurfaceExtensionEnabled = true;
4076#endif
4077#ifdef VK_USE_PLATFORM_XLIB_KHR
4078        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
4079            instance_data->xlibSurfaceExtensionEnabled = true;
4080#endif
4081    }
4082}
4083
4084VKAPI_ATTR VkResult VKAPI_CALL
4085CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4086    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4087
4088    assert(chain_info->u.pLayerInfo);
4089    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4090    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4091    if (fpCreateInstance == NULL)
4092        return VK_ERROR_INITIALIZATION_FAILED;
4093
4094    // Advance the link info for the next element on the chain
4095    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4096
4097    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4098    if (result != VK_SUCCESS)
4099        return result;
4100
4101    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map);
4102    instance_data->instance = *pInstance;
4103    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
4104
4105    instance_data->report_data = debug_report_create_instance(
4106        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4107    checkInstanceRegisterExtensions(pCreateInfo, instance_data);
4108    init_core_validation(instance_data, pAllocator);
4109
4110    ValidateLayerOrdering(*pCreateInfo);
4111
4112    return result;
4113}
4114
4115// Hook DestroyInstance to remove tableInstanceMap entry
4116VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4117    // TODOSC : Shouldn't need any customization here
4118    dispatch_key key = get_dispatch_key(instance);
4119    // TBD: Need any locking this early, in case this function is called at the
4120    // same time by more than one thread?
4121    instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map);
4122    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
4123
4124    std::lock_guard<std::mutex> lock(global_lock);
4125    // Clean up logging callback, if any
4126    while (instance_data->logging_callback.size() > 0) {
4127        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
4128        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
4129        instance_data->logging_callback.pop_back();
4130    }
4131
4132    layer_debug_report_destroy_instance(instance_data->report_data);
4133    layer_data_map.erase(key);
4134}
4135
4136static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4137    uint32_t i;
4138    // TBD: Need any locking, in case this function is called at the same time
4139    // by more than one thread?
4140    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4141    dev_data->device_extensions.wsi_enabled = false;
4142    dev_data->device_extensions.wsi_display_swapchain_enabled = false;
4143
4144    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4145        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4146            dev_data->device_extensions.wsi_enabled = true;
4147        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4148            dev_data->device_extensions.wsi_display_swapchain_enabled = true;
4149    }
4150}
4151
4152// Verify that queue family has been properly requested
4153static bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu,
4154                                                   const VkDeviceCreateInfo *create_info) {
4155    bool skip_call = false;
4156    auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
4157    // First check is app has actually requested queueFamilyProperties
4158    if (!physical_device_state) {
4159        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4160                             0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4161                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
4162    } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
4163        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
4164        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
4165                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4166                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4167    } else {
4168        // Check that the requested queue properties are valid
4169        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4170            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4171            if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
4172                skip_call |= log_msg(
4173                    instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4174                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4175                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4176            } else if (create_info->pQueueCreateInfos[i].queueCount >
4177                       physical_device_state->queue_family_properties[requestedIndex].queueCount) {
4178                skip_call |=
4179                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4180                            0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4181                            "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4182                            "requested queueCount is %u.",
4183                            requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
4184                            create_info->pQueueCreateInfos[i].queueCount);
4185            }
4186        }
4187    }
4188    return skip_call;
4189}
4190
4191// Verify that features have been queried and that they are available
4192static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys, const VkPhysicalDeviceFeatures *requested_features) {
4193    bool skip_call = false;
4194
4195    auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
4196    const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
4197    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4198    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4199    //  Need to provide the struct member name with the issue. To do that seems like we'll
4200    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4201    uint32_t errors = 0;
4202    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4203    for (uint32_t i = 0; i < total_bools; i++) {
4204        if (requested[i] > actual[i]) {
4205            // TODO: Add index to struct member name helper to be able to include a feature name
4206            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4207                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4208                "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4209                "which is not available on this device.",
4210                i);
4211            errors++;
4212        }
4213    }
4214    if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
4215        // If user didn't request features, notify them that they should
4216        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4217        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4218                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4219                             "DL", "You requested features that are unavailable on this device. You should first query feature "
4220                                   "availability by calling vkGetPhysicalDeviceFeatures().");
4221    }
4222    return skip_call;
4223}
4224
4225VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4226                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4227    instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map);
4228    bool skip_call = false;
4229
4230    // Check that any requested features are available
4231    if (pCreateInfo->pEnabledFeatures) {
4232        skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures);
4233    }
4234    skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo);
4235
4236    if (skip_call) {
4237        return VK_ERROR_VALIDATION_FAILED_EXT;
4238    }
4239
4240    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4241
4242    assert(chain_info->u.pLayerInfo);
4243    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4244    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4245    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4246    if (fpCreateDevice == NULL) {
4247        return VK_ERROR_INITIALIZATION_FAILED;
4248    }
4249
4250    // Advance the link info for the next element on the chain
4251    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4252
4253    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4254    if (result != VK_SUCCESS) {
4255        return result;
4256    }
4257
4258    std::unique_lock<std::mutex> lock(global_lock);
4259    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4260
4261    my_device_data->instance_data = my_instance_data;
4262    // Setup device dispatch table
4263    layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr);
4264    my_device_data->device = *pDevice;
4265    // Save PhysicalDevice handle
4266    my_device_data->physical_device = gpu;
4267
4268    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4269    checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4270    // Get physical device limits for this device
4271    my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4272    uint32_t count;
4273    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4274    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4275    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
4276        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4277    // TODO: device limits should make sure these are compatible
4278    if (pCreateInfo->pEnabledFeatures) {
4279        my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
4280    } else {
4281        memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
4282    }
4283    // Store physical device mem limits into device layer_data struct
4284    my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4285    lock.unlock();
4286
4287    ValidateLayerOrdering(*pCreateInfo);
4288
4289    return result;
4290}
4291
4292// prototype
4293VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4294    // TODOSC : Shouldn't need any customization here
4295    bool skip = false;
4296    dispatch_key key = get_dispatch_key(device);
4297    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4298    // Free all the memory
4299    std::unique_lock<std::mutex> lock(global_lock);
4300    deletePipelines(dev_data);
4301    dev_data->renderPassMap.clear();
4302    deleteCommandBuffers(dev_data);
4303    // This will also delete all sets in the pool & remove them from setMap
4304    deletePools(dev_data);
4305    // All sets should be removed
4306    assert(dev_data->setMap.empty());
4307    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4308        delete del_layout.second;
4309    }
4310    dev_data->descriptorSetLayoutMap.clear();
4311    dev_data->imageViewMap.clear();
4312    dev_data->imageMap.clear();
4313    dev_data->imageSubresourceMap.clear();
4314    dev_data->imageLayoutMap.clear();
4315    dev_data->bufferViewMap.clear();
4316    dev_data->bufferMap.clear();
4317    // Queues persist until device is destroyed
4318    dev_data->queueMap.clear();
4319    // Report any memory leaks
4320    layer_debug_report_destroy_device(device);
4321    lock.unlock();
4322
4323#if DISPATCH_MAP_DEBUG
4324    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4325#endif
4326    if (!skip) {
4327        dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4328        layer_data_map.erase(key);
4329    }
4330}
4331
4332static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4333
4334// This validates that the initial layout specified in the command buffer for
4335// the IMAGE is the same
4336// as the global IMAGE layout
4337static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4338    bool skip_call = false;
4339    for (auto cb_image_data : pCB->imageLayoutMap) {
4340        VkImageLayout imageLayout;
4341        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4342            skip_call |=
4343                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4344                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4345                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4346        } else {
4347            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4348                // TODO: Set memory invalid which is in mem_tracker currently
4349            } else if (imageLayout != cb_image_data.second.initialLayout) {
4350                if (cb_image_data.first.hasSubresource) {
4351                    skip_call |= log_msg(
4352                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4353                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4354                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4355                        "with layout %s when first use is %s.",
4356                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4357                                cb_image_data.first.subresource.arrayLayer,
4358                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4359                        string_VkImageLayout(cb_image_data.second.initialLayout));
4360                } else {
4361                    skip_call |= log_msg(
4362                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4363                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4364                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4365                        "first use is %s.",
4366                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4367                        string_VkImageLayout(cb_image_data.second.initialLayout));
4368                }
4369            }
4370            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4371        }
4372    }
4373    return skip_call;
4374}
4375
4376// Loop through bound objects and increment their in_use counts
4377//  For any unknown objects, flag an error
4378static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4379    bool skip = false;
4380    DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4381    BASE_NODE *base_obj = nullptr;
4382    for (auto obj : cb_node->object_bindings) {
4383        switch (obj.type) {
4384        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4385            base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4386            error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4387            break;
4388        }
4389        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4390            base_obj = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4391            error_code = DRAWSTATE_INVALID_SAMPLER;
4392            break;
4393        }
4394        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4395            base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4396            error_code = DRAWSTATE_INVALID_QUERY_POOL;
4397            break;
4398        }
4399        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4400            base_obj = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4401            error_code = DRAWSTATE_INVALID_PIPELINE;
4402            break;
4403        }
4404        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4405            base_obj = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4406            error_code = DRAWSTATE_INVALID_BUFFER;
4407            break;
4408        }
4409        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4410            base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4411            error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4412            break;
4413        }
4414        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4415            base_obj = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4416            error_code = DRAWSTATE_INVALID_IMAGE;
4417            break;
4418        }
4419        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4420            base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4421            error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4422            break;
4423        }
4424        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4425            base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4426            error_code = DRAWSTATE_INVALID_EVENT;
4427            break;
4428        }
4429        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4430            base_obj = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4431            error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4432            break;
4433        }
4434        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4435            base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4436            error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4437            break;
4438        }
4439        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4440            base_obj = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4441            error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4442            break;
4443        }
4444        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4445            base_obj = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4446            error_code = DRAWSTATE_INVALID_RENDERPASS;
4447            break;
4448        }
4449        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4450            base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4451            error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4452            break;
4453        }
4454        default:
4455            // TODO : Merge handling of other objects types into this code
4456            break;
4457        }
4458        if (!base_obj) {
4459            skip |=
4460                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4461                        "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4462        } else {
4463            base_obj->in_use.fetch_add(1);
4464        }
4465    }
4466    return skip;
4467}
4468
4469// Track which resources are in-flight by atomically incrementing their "in_use" count
4470static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4471    bool skip_call = false;
4472
4473    cb_node->in_use.fetch_add(1);
4474    dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4475
4476    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4477    skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4478    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4479    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4480    //  should then be flagged prior to calling this function
4481    for (auto drawDataElement : cb_node->drawData) {
4482        for (auto buffer : drawDataElement.buffers) {
4483            auto buffer_state = getBufferState(dev_data, buffer);
4484            if (!buffer_state) {
4485                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4486                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4487                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4488            } else {
4489                buffer_state->in_use.fetch_add(1);
4490            }
4491        }
4492    }
4493    for (auto event : cb_node->writeEventsBeforeWait) {
4494        auto event_state = getEventNode(dev_data, event);
4495        if (event_state)
4496            event_state->write_in_use++;
4497    }
4498    return skip_call;
4499}
4500
4501// Note: This function assumes that the global lock is held by the calling thread.
4502// For the given queue, verify the queue state up to the given seq number.
4503// Currently the only check is to make sure that if there are events to be waited on prior to
4504//  a QueryReset, make sure that all such events have been signalled.
4505static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *queue, uint64_t seq) {
4506    bool skip = false;
4507    auto queue_seq = queue->seq;
4508    std::unordered_map<VkQueue, uint64_t> other_queue_seqs;
4509    auto sub_it = queue->submissions.begin();
4510    while (queue_seq < seq) {
4511        for (auto &wait : sub_it->waitSemaphores) {
4512            auto &last_seq = other_queue_seqs[wait.queue];
4513            last_seq = std::max(last_seq, wait.seq);
4514        }
4515        for (auto cb : sub_it->cbs) {
4516            auto cb_node = getCBNode(dev_data, cb);
4517            if (cb_node) {
4518                for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
4519                    for (auto event : queryEventsPair.second) {
4520                        if (dev_data->eventMap[event].needsSignaled) {
4521                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4522                                            VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4523                                            "Cannot get query results on queryPool 0x%" PRIx64
4524                                            " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4525                                            (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4526                        }
4527                    }
4528                }
4529            }
4530        }
4531        sub_it++;
4532        queue_seq++;
4533    }
4534    for (auto qs : other_queue_seqs) {
4535        skip |= VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, qs.first), qs.second);
4536    }
4537    return skip;
4538}
4539
4540// When the given fence is retired, verify outstanding queue operations through the point of the fence
4541static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
4542    auto fence_state = getFenceNode(dev_data, fence);
4543    if (VK_NULL_HANDLE != fence_state->signaler.first) {
4544        return VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
4545    }
4546    return false;
4547}
4548
4549// TODO: nuke this completely.
4550// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4551static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4552    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4553    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4554    pCB->in_use.fetch_sub(1);
4555    if (!pCB->in_use.load()) {
4556        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4557    }
4558}
4559
4560// Decrement in-use count for objects bound to command buffer
4561static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4562    BASE_NODE *base_obj = nullptr;
4563    for (auto obj : cb_node->object_bindings) {
4564        base_obj = GetStateStructPtrFromObject(dev_data, obj);
4565        if (base_obj) {
4566            base_obj->in_use.fetch_sub(1);
4567        }
4568    }
4569}
4570
4571static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
4572    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4573
4574    // Roll this queue forward, one submission at a time.
4575    while (pQueue->seq < seq) {
4576        auto & submission = pQueue->submissions.front();
4577
4578        for (auto & wait : submission.waitSemaphores) {
4579            auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4580            if (pSemaphore) {
4581                pSemaphore->in_use.fetch_sub(1);
4582            }
4583            auto & lastSeq = otherQueueSeqs[wait.queue];
4584            lastSeq = std::max(lastSeq, wait.seq);
4585        }
4586
4587        for (auto & semaphore : submission.signalSemaphores) {
4588            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4589            if (pSemaphore) {
4590                pSemaphore->in_use.fetch_sub(1);
4591            }
4592        }
4593
4594        for (auto cb : submission.cbs) {
4595            auto cb_node = getCBNode(dev_data, cb);
4596            if (!cb_node) {
4597                continue;
4598            }
4599            // First perform decrement on general case bound objects
4600            DecrementBoundResources(dev_data, cb_node);
4601            for (auto drawDataElement : cb_node->drawData) {
4602                for (auto buffer : drawDataElement.buffers) {
4603                    auto buffer_state = getBufferState(dev_data, buffer);
4604                    if (buffer_state) {
4605                        buffer_state->in_use.fetch_sub(1);
4606                    }
4607                }
4608            }
4609            for (auto event : cb_node->writeEventsBeforeWait) {
4610                auto eventNode = dev_data->eventMap.find(event);
4611                if (eventNode != dev_data->eventMap.end()) {
4612                    eventNode->second.write_in_use--;
4613                }
4614            }
4615            for (auto queryStatePair : cb_node->queryToStateMap) {
4616                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4617            }
4618            for (auto eventStagePair : cb_node->eventToStageMap) {
4619                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4620            }
4621
4622            removeInFlightCmdBuffer(dev_data, cb);
4623        }
4624
4625        auto pFence = getFenceNode(dev_data, submission.fence);
4626        if (pFence) {
4627            pFence->state = FENCE_RETIRED;
4628        }
4629
4630        pQueue->submissions.pop_front();
4631        pQueue->seq++;
4632    }
4633
4634    // Roll other queues forward to the highest seq we saw a wait for
4635    for (auto qs : otherQueueSeqs) {
4636        RetireWorkOnQueue(dev_data, getQueueState(dev_data, qs.first), qs.second);
4637    }
4638}
4639
4640
4641// Submit a fence to a queue, delimiting previous fences and previous untracked
4642// work by it.
4643static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
4644    pFence->state = FENCE_INFLIGHT;
4645    pFence->signaler.first = pQueue->queue;
4646    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4647}
4648
4649static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4650    bool skip_call = false;
4651    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4652        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4653        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4654                             0, __LINE__, VALIDATION_ERROR_00133, "DS",
4655                             "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
4656                             validation_error_map[VALIDATION_ERROR_00133]);
4657    }
4658    return skip_call;
4659}
4660
4661static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
4662    bool skip = false;
4663    if (dev_data->instance_data->disabled.command_buffer_state)
4664        return skip;
4665    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4666    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4667        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4668                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4669                        "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4670                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4671                        pCB->commandBuffer, pCB->submitCount);
4672    }
4673    // Validate that cmd buffers have been updated
4674    if (CB_RECORDED != pCB->state) {
4675        if (CB_INVALID == pCB->state) {
4676            // Inform app of reason CB invalid
4677            for (auto obj : pCB->broken_bindings) {
4678                const char *type_str = object_type_to_string(obj.type);
4679                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4680                const char *cause_str =
4681                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4682
4683                skip |=
4684                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4685                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4686                            "You are submitting command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
4687                            pCB->commandBuffer, type_str, obj.handle, cause_str);
4688            }
4689        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4690            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4691                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4692                            "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!", pCB->commandBuffer,
4693                            call_source);
4694        }
4695    }
4696    return skip;
4697}
4698
4699// Validate that queueFamilyIndices of primary command buffers match this queue
4700// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4701static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4702    bool skip_call = false;
4703    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4704    auto queue_state = getQueueState(dev_data, queue);
4705
4706    if (pPool && queue_state && (pPool->queueFamilyIndex != queue_state->queueFamilyIndex)) {
4707        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4708                             reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_00139, "DS",
4709                             "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
4710                             "0x%p from queue family %d. %s",
4711                             pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
4712                             validation_error_map[VALIDATION_ERROR_00139]);
4713    }
4714
4715    return skip_call;
4716}
4717
4718static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4719    // Track in-use for resources off of primary and any secondary CBs
4720    bool skip_call = false;
4721
4722    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4723    // on device
4724    skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4725
4726    skip_call |= validateAndIncrementResources(dev_data, pCB);
4727
4728    if (!pCB->secondaryCommandBuffers.empty()) {
4729        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4730            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4731            skip_call |= validateAndIncrementResources(dev_data, pSubCB);
4732            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4733                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4734                log_msg(
4735                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4736                    __LINE__, VALIDATION_ERROR_00135, "DS",
4737                    "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
4738                    "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
4739                    pCB->commandBuffer, secondaryCmdBuffer, pSubCB->primaryCommandBuffer,
4740                    validation_error_map[VALIDATION_ERROR_00135]);
4741            }
4742        }
4743    }
4744
4745    skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
4746
4747    return skip_call;
4748}
4749
4750static bool
4751ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4752{
4753    bool skip_call = false;
4754
4755    if (pFence) {
4756        if (pFence->state == FENCE_INFLIGHT) {
4757            // TODO: opportunities for VALIDATION_ERROR_00127, VALIDATION_ERROR_01647, VALIDATION_ERROR_01953
4758            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4759                                 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4760                                 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4761        }
4762
4763        else if (pFence->state == FENCE_RETIRED) {
4764            // TODO: opportunities for VALIDATION_ERROR_00126, VALIDATION_ERROR_01646, VALIDATION_ERROR_01953
4765            skip_call |=
4766                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4767                        reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4768                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4769                        reinterpret_cast<uint64_t &>(pFence->fence));
4770        }
4771    }
4772
4773    return skip_call;
4774}
4775
4776
4777VKAPI_ATTR VkResult VKAPI_CALL
4778QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4779    bool skip_call = false;
4780    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4781    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4782    std::unique_lock<std::mutex> lock(global_lock);
4783
4784    auto pQueue = getQueueState(dev_data, queue);
4785    auto pFence = getFenceNode(dev_data, fence);
4786    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
4787
4788    if (skip_call) {
4789        return VK_ERROR_VALIDATION_FAILED_EXT;
4790    }
4791
4792    // Mark the fence in-use.
4793    if (pFence) {
4794        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
4795    }
4796
4797    // Now verify each individual submit
4798    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4799        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4800        vector<SEMAPHORE_WAIT> semaphore_waits;
4801        vector<VkSemaphore> semaphore_signals;
4802        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4803            VkSemaphore semaphore = submit->pWaitSemaphores[i];
4804            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4805            if (pSemaphore) {
4806                if (pSemaphore->signaled) {
4807                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
4808                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
4809                        pSemaphore->in_use.fetch_add(1);
4810                    }
4811                    pSemaphore->signaler.first = VK_NULL_HANDLE;
4812                    pSemaphore->signaled = false;
4813                } else {
4814                    skip_call |=
4815                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4816                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4817                                "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
4818                                reinterpret_cast<const uint64_t &>(semaphore));
4819                }
4820            }
4821        }
4822        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4823            VkSemaphore semaphore = submit->pSignalSemaphores[i];
4824            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4825            if (pSemaphore) {
4826                if (pSemaphore->signaled) {
4827                    skip_call |=
4828                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4829                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4830                                "Queue 0x%p is signaling semaphore 0x%" PRIx64
4831                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4832                                queue, reinterpret_cast<const uint64_t &>(semaphore),
4833                                reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
4834                } else {
4835                    pSemaphore->signaler.first = queue;
4836                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
4837                    pSemaphore->signaled = true;
4838                    pSemaphore->in_use.fetch_add(1);
4839                    semaphore_signals.push_back(semaphore);
4840                }
4841            }
4842        }
4843
4844        std::vector<VkCommandBuffer> cbs;
4845
4846        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4847            auto cb_node = getCBNode(dev_data, submit->pCommandBuffers[i]);
4848            skip_call |= ValidateCmdBufImageLayouts(dev_data, cb_node);
4849            if (cb_node) {
4850                cbs.push_back(submit->pCommandBuffers[i]);
4851                for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) {
4852                    cbs.push_back(secondaryCmdBuffer);
4853                }
4854
4855                cb_node->submitCount++; // increment submit count
4856                skip_call |= validatePrimaryCommandBufferState(dev_data, cb_node);
4857                skip_call |= validateQueueFamilyIndices(dev_data, cb_node, queue);
4858                // Potential early exit here as bad object state may crash in delayed function calls
4859                if (skip_call)
4860                    return result;
4861                // Call submit-time functions to validate/update state
4862                for (auto &function : cb_node->validate_functions) {
4863                    skip_call |= function();
4864                }
4865                for (auto &function : cb_node->eventUpdates) {
4866                    skip_call |= function(queue);
4867                }
4868                for (auto &function : cb_node->queryUpdates) {
4869                    skip_call |= function(queue);
4870                }
4871            }
4872        }
4873
4874        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
4875                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
4876    }
4877
4878    if (pFence && !submitCount) {
4879        // If no submissions, but just dropping a fence on the end of the queue,
4880        // record an empty submission with just the fence, so we can determine
4881        // its completion.
4882        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
4883                                         std::vector<SEMAPHORE_WAIT>(),
4884                                         std::vector<VkSemaphore>(),
4885                                         fence);
4886    }
4887
4888    lock.unlock();
4889    if (!skip_call)
4890        result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
4891
4892    return result;
4893}
4894
4895static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
4896    bool skip = false;
4897    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
4898        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4899                        reinterpret_cast<const uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_00611, "MEM",
4900                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
4901                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
4902                        validation_error_map[VALIDATION_ERROR_00611]);
4903    }
4904    return skip;
4905}
4906
4907static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
4908    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
4909    return;
4910}
4911
4912VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4913                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4914    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4915    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4916    std::unique_lock<std::mutex> lock(global_lock);
4917    bool skip = PreCallValidateAllocateMemory(dev_data);
4918    if (!skip) {
4919        lock.unlock();
4920        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4921        lock.lock();
4922        if (VK_SUCCESS == result) {
4923            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
4924        }
4925    }
4926    return result;
4927}
4928
4929// For given obj node, if it is use, flag a validation error and return callback result, else return false
4930bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
4931                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
4932    if (dev_data->instance_data->disabled.object_in_use)
4933        return false;
4934    bool skip = false;
4935    if (obj_node->in_use.load()) {
4936        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
4937                        error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
4938                        object_type_to_string(obj_struct.type), obj_struct.handle, validation_error_map[error_code]);
4939    }
4940    return skip;
4941}
4942
4943static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
4944    *mem_info = getMemObjInfo(dev_data, mem);
4945    *obj_struct = {reinterpret_cast<uint64_t &>(mem), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT};
4946    if (dev_data->instance_data->disabled.free_memory)
4947        return false;
4948    bool skip = false;
4949    if (*mem_info) {
4950        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_00620);
4951    }
4952    return skip;
4953}
4954
4955static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
4956    // Clear mem binding for any bound objects
4957    for (auto obj : mem_info->obj_bindings) {
4958        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, MEMTRACK_FREED_MEM_REF,
4959                "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64, obj.handle,
4960                (uint64_t)mem_info->mem);
4961        switch (obj.type) {
4962        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4963            auto image_state = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4964            assert(image_state); // Any destroyed images should already be removed from bindings
4965            image_state->binding.mem = MEMORY_UNBOUND;
4966            break;
4967        }
4968        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4969            auto buffer_state = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4970            assert(buffer_state); // Any destroyed buffers should already be removed from bindings
4971            buffer_state->binding.mem = MEMORY_UNBOUND;
4972            break;
4973        }
4974        default:
4975            // Should only have buffer or image objects bound to memory
4976            assert(0);
4977        }
4978    }
4979    // Any bound cmd buffers are now invalid
4980    invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
4981    dev_data->memObjMap.erase(mem);
4982}
4983
4984VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4985    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4986    DEVICE_MEM_INFO *mem_info = nullptr;
4987    VK_OBJECT obj_struct;
4988    std::unique_lock<std::mutex> lock(global_lock);
4989    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
4990    if (!skip) {
4991        lock.unlock();
4992        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
4993        lock.lock();
4994        PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
4995    }
4996}
4997
4998// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
4999//  and that the size of the map range should be:
5000//  1. Not zero
5001//  2. Within the size of the memory allocation
5002static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5003    bool skip_call = false;
5004
5005    if (size == 0) {
5006        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5007                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5008                            "VkMapMemory: Attempting to map memory range of size zero");
5009    }
5010
5011    auto mem_element = my_data->memObjMap.find(mem);
5012    if (mem_element != my_data->memObjMap.end()) {
5013        auto mem_info = mem_element->second.get();
5014        // It is an application error to call VkMapMemory on an object that is already mapped
5015        if (mem_info->mem_range.size != 0) {
5016            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5017                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5018                                "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
5019        }
5020
5021        // Validate that offset + size is within object's allocationSize
5022        if (size == VK_WHOLE_SIZE) {
5023            if (offset >= mem_info->alloc_info.allocationSize) {
5024                skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5025                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5026                                    "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5027                                           " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
5028                                    offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
5029            }
5030        } else {
5031            if ((offset + size) > mem_info->alloc_info.allocationSize) {
5032                skip_call = log_msg(
5033                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5034                    (uint64_t)mem, __LINE__, VALIDATION_ERROR_00628, "MEM",
5035                    "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s", offset,
5036                    size + offset, mem_info->alloc_info.allocationSize, validation_error_map[VALIDATION_ERROR_00628]);
5037            }
5038        }
5039    }
5040    return skip_call;
5041}
5042
5043static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5044    auto mem_info = getMemObjInfo(my_data, mem);
5045    if (mem_info) {
5046        mem_info->mem_range.offset = offset;
5047        mem_info->mem_range.size = size;
5048    }
5049}
5050
5051static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5052    bool skip_call = false;
5053    auto mem_info = getMemObjInfo(my_data, mem);
5054    if (mem_info) {
5055        if (!mem_info->mem_range.size) {
5056            // Valid Usage: memory must currently be mapped
5057            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5058                                (uint64_t)mem, __LINE__, VALIDATION_ERROR_00649, "MEM",
5059                                "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", (uint64_t)mem,
5060                                validation_error_map[VALIDATION_ERROR_00649]);
5061        }
5062        mem_info->mem_range.size = 0;
5063        if (mem_info->shadow_copy) {
5064            free(mem_info->shadow_copy_base);
5065            mem_info->shadow_copy_base = 0;
5066            mem_info->shadow_copy = 0;
5067        }
5068    }
5069    return skip_call;
5070}
5071
5072// Guard value for pad data
5073static char NoncoherentMemoryFillValue = 0xb;
5074
5075static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5076                                     void **ppData) {
5077    auto mem_info = getMemObjInfo(dev_data, mem);
5078    if (mem_info) {
5079        mem_info->p_driver_data = *ppData;
5080        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
5081        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5082            mem_info->shadow_copy = 0;
5083        } else {
5084            if (size == VK_WHOLE_SIZE) {
5085                size = mem_info->alloc_info.allocationSize - offset;
5086            }
5087            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5088            assert(vk_safe_modulo(mem_info->shadow_pad_size,
5089                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5090            // Ensure start of mapped region reflects hardware alignment constraints
5091            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5092
5093            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5094            uint64_t start_offset = offset % map_alignment;
5095            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
5096            mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
5097
5098            mem_info->shadow_copy =
5099                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5100                                         ~(map_alignment - 1)) + start_offset;
5101            assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5102                                  map_alignment) == 0);
5103
5104            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
5105            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
5106        }
5107    }
5108}
5109
5110// Verify that state for fence being waited on is appropriate. That is,
5111//  a fence being waited on should not already be signaled and
5112//  it should have been submitted on a queue or during acquire next image
5113static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
5114    bool skip_call = false;
5115
5116    auto pFence = getFenceNode(dev_data, fence);
5117    if (pFence) {
5118        if (pFence->state == FENCE_UNSIGNALED) {
5119            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5120                                 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5121                                 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5122                                 "acquire next image.",
5123                                 apiCall, reinterpret_cast<uint64_t &>(fence));
5124        }
5125    }
5126    return skip_call;
5127}
5128
5129static void RetireFence(layer_data *dev_data, VkFence fence) {
5130    auto pFence = getFenceNode(dev_data, fence);
5131    if (pFence->signaler.first != VK_NULL_HANDLE) {
5132        // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
5133        RetireWorkOnQueue(dev_data, getQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
5134    }
5135    else {
5136        // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
5137        // the fence as retired.
5138        pFence->state = FENCE_RETIRED;
5139    }
5140}
5141
5142static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
5143    if (dev_data->instance_data->disabled.wait_for_fences)
5144        return false;
5145    bool skip = false;
5146    for (uint32_t i = 0; i < fence_count; i++) {
5147        skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
5148        skip |= VerifyQueueStateToFence(dev_data, fences[i]);
5149    }
5150    return skip;
5151}
5152
5153static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
5154    // When we know that all fences are complete we can clean/remove their CBs
5155    if ((VK_TRUE == wait_all) || (1 == fence_count)) {
5156        for (uint32_t i = 0; i < fence_count; i++) {
5157            RetireFence(dev_data, fences[i]);
5158        }
5159    }
5160    // NOTE : Alternate case not handled here is when some fences have completed. In
5161    //  this case for app to guarantee which fences completed it will have to call
5162    //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5163}
5164
5165VKAPI_ATTR VkResult VKAPI_CALL
5166WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5167    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5168    // Verify fence status of submitted fences
5169    std::unique_lock<std::mutex> lock(global_lock);
5170    bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
5171    lock.unlock();
5172    if (skip)
5173        return VK_ERROR_VALIDATION_FAILED_EXT;
5174
5175    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5176
5177    if (result == VK_SUCCESS) {
5178        lock.lock();
5179        PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
5180        lock.unlock();
5181    }
5182    return result;
5183}
5184
5185static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
5186    if (dev_data->instance_data->disabled.get_fence_state)
5187        return false;
5188    return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5189}
5190
5191static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
5192
5193VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
5194    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5195    std::unique_lock<std::mutex> lock(global_lock);
5196    bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
5197    lock.unlock();
5198    if (skip)
5199        return VK_ERROR_VALIDATION_FAILED_EXT;
5200
5201    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
5202    if (result == VK_SUCCESS) {
5203        lock.lock();
5204        PostCallRecordGetFenceStatus(dev_data, fence);
5205        lock.unlock();
5206    }
5207    return result;
5208}
5209
5210static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
5211    // Add queue to tracking set only if it is new
5212    auto result = dev_data->queues.emplace(queue);
5213    if (result.second == true) {
5214        QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
5215        queue_state->queue = queue;
5216        queue_state->queueFamilyIndex = q_family_index;
5217        queue_state->seq = 0;
5218    }
5219}
5220
5221VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5222                                                            VkQueue *pQueue) {
5223    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5224    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5225    std::lock_guard<std::mutex> lock(global_lock);
5226
5227    PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
5228}
5229
5230static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
5231    *queue_state = getQueueState(dev_data, queue);
5232    if (dev_data->instance_data->disabled.queue_wait_idle)
5233        return false;
5234    return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
5235}
5236
5237static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
5238    RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
5239}
5240
5241VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5242    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5243    QUEUE_STATE *queue_state = nullptr;
5244    std::unique_lock<std::mutex> lock(global_lock);
5245    bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
5246    lock.unlock();
5247    if (skip)
5248        return VK_ERROR_VALIDATION_FAILED_EXT;
5249    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
5250    if (VK_SUCCESS == result) {
5251        lock.lock();
5252        PostCallRecordQueueWaitIdle(dev_data, queue_state);
5253        lock.unlock();
5254    }
5255    return result;
5256}
5257
5258static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
5259    if (dev_data->instance_data->disabled.device_wait_idle)
5260        return false;
5261    bool skip = false;
5262    for (auto &queue : dev_data->queueMap) {
5263        skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5264    }
5265    return skip;
5266}
5267
5268static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
5269    for (auto &queue : dev_data->queueMap) {
5270        RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5271    }
5272}
5273
5274VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5275    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5276    std::unique_lock<std::mutex> lock(global_lock);
5277    bool skip = PreCallValidateDeviceWaitIdle(dev_data);
5278    lock.unlock();
5279    if (skip)
5280        return VK_ERROR_VALIDATION_FAILED_EXT;
5281    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
5282    if (VK_SUCCESS == result) {
5283        lock.lock();
5284        PostCallRecordDeviceWaitIdle(dev_data);
5285        lock.unlock();
5286    }
5287    return result;
5288}
5289
5290static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
5291    *fence_node = getFenceNode(dev_data, fence);
5292    *obj_struct = {reinterpret_cast<uint64_t &>(fence), VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT};
5293    if (dev_data->instance_data->disabled.destroy_fence)
5294        return false;
5295    bool skip = false;
5296    if (*fence_node) {
5297        if ((*fence_node)->state == FENCE_INFLIGHT) {
5298            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5299                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5300                            (uint64_t)(fence));
5301        }
5302    }
5303    return skip;
5304}
5305
5306static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
5307
5308VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5309    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5310    // Common data objects used pre & post call
5311    FENCE_NODE *fence_node = nullptr;
5312    VK_OBJECT obj_struct;
5313    std::unique_lock<std::mutex> lock(global_lock);
5314    bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
5315
5316    if (!skip) {
5317        lock.unlock();
5318        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
5319        lock.lock();
5320        PostCallRecordDestroyFence(dev_data, fence);
5321    }
5322}
5323
5324static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
5325                                            VK_OBJECT *obj_struct) {
5326    *sema_node = getSemaphoreNode(dev_data, semaphore);
5327    *obj_struct = {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT};
5328    if (dev_data->instance_data->disabled.destroy_semaphore)
5329        return false;
5330    bool skip = false;
5331    if (*sema_node) {
5332        skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_00199);
5333    }
5334    return skip;
5335}
5336
5337static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
5338
5339VKAPI_ATTR void VKAPI_CALL
5340DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5341    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5342    SEMAPHORE_NODE *sema_node;
5343    VK_OBJECT obj_struct;
5344    std::unique_lock<std::mutex> lock(global_lock);
5345    bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
5346    if (!skip) {
5347        lock.unlock();
5348        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
5349        lock.lock();
5350        PostCallRecordDestroySemaphore(dev_data, semaphore);
5351    }
5352}
5353
5354static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
5355    *event_state = getEventNode(dev_data, event);
5356    *obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5357    if (dev_data->instance_data->disabled.destroy_event)
5358        return false;
5359    bool skip = false;
5360    if (*event_state) {
5361        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_00213);
5362    }
5363    return skip;
5364}
5365
5366static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
5367    invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
5368    dev_data->eventMap.erase(event);
5369}
5370
5371VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5372    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5373    EVENT_STATE *event_state = nullptr;
5374    VK_OBJECT obj_struct;
5375    std::unique_lock<std::mutex> lock(global_lock);
5376    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
5377    if (!skip) {
5378        lock.unlock();
5379        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
5380        lock.lock();
5381        PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
5382    }
5383}
5384
5385static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
5386                                            VK_OBJECT *obj_struct) {
5387    *qp_state = getQueryPoolNode(dev_data, query_pool);
5388    *obj_struct = {reinterpret_cast<uint64_t &>(query_pool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5389    if (dev_data->instance_data->disabled.destroy_query_pool)
5390        return false;
5391    bool skip = false;
5392    if (*qp_state) {
5393        skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_01012);
5394    }
5395    return skip;
5396}
5397
5398static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state, VK_OBJECT obj_struct) {
5399    invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
5400    dev_data->queryPoolMap.erase(query_pool);
5401}
5402
5403VKAPI_ATTR void VKAPI_CALL
5404DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5405    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5406    QUERY_POOL_NODE *qp_state = nullptr;
5407    VK_OBJECT obj_struct;
5408    std::unique_lock<std::mutex> lock(global_lock);
5409    bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
5410    if (!skip) {
5411        lock.unlock();
5412        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
5413        lock.lock();
5414        PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
5415    }
5416}
5417static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5418                                               uint32_t query_count, VkQueryResultFlags flags,
5419                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5420    for (auto cmd_buffer : dev_data->globalInFlightCmdBuffers) {
5421        auto cb = getCBNode(dev_data, cmd_buffer);
5422        for (auto query_state_pair : cb->queryToStateMap) {
5423            (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer);
5424        }
5425    }
5426    if (dev_data->instance_data->disabled.get_query_pool_results)
5427        return false;
5428    bool skip = false;
5429    for (uint32_t i = 0; i < query_count; ++i) {
5430        QueryObject query = {query_pool, first_query + i};
5431        auto qif_pair = queries_in_flight->find(query);
5432        auto query_state_pair = dev_data->queryToStateMap.find(query);
5433        if (query_state_pair != dev_data->queryToStateMap.end()) {
5434            // Available and in flight
5435            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5436                query_state_pair->second) {
5437                for (auto cmd_buffer : qif_pair->second) {
5438                    auto cb = getCBNode(dev_data, cmd_buffer);
5439                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5440                    if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
5441                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5442                                        VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5443                                        "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5444                                        (uint64_t)(query_pool), first_query + i);
5445                    }
5446                }
5447                // Unavailable and in flight
5448            } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5449                       !query_state_pair->second) {
5450                // TODO : Can there be the same query in use by multiple command buffers in flight?
5451                bool make_available = false;
5452                for (auto cmd_buffer : qif_pair->second) {
5453                    auto cb = getCBNode(dev_data, cmd_buffer);
5454                    make_available |= cb->queryToStateMap[query];
5455                }
5456                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5457                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5458                                    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5459                                    "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5460                                    (uint64_t)(query_pool), first_query + i);
5461                }
5462                // Unavailable
5463            } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
5464                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5465                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5466                                "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5467                                (uint64_t)(query_pool), first_query + i);
5468                // Uninitialized
5469            } else if (query_state_pair == dev_data->queryToStateMap.end()) {
5470                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5471                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5472                                "Cannot get query results on queryPool 0x%" PRIx64
5473                                " with index %d as data has not been collected for this index.",
5474                                (uint64_t)(query_pool), first_query + i);
5475            }
5476        }
5477    }
5478    return skip;
5479}
5480
5481static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5482                                              uint32_t query_count,
5483                                              unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5484    for (uint32_t i = 0; i < query_count; ++i) {
5485        QueryObject query = {query_pool, first_query + i};
5486        auto qif_pair = queries_in_flight->find(query);
5487        auto query_state_pair = dev_data->queryToStateMap.find(query);
5488        if (query_state_pair != dev_data->queryToStateMap.end()) {
5489            // Available and in flight
5490            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5491                query_state_pair->second) {
5492                for (auto cmd_buffer : qif_pair->second) {
5493                    auto cb = getCBNode(dev_data, cmd_buffer);
5494                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5495                    if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
5496                        for (auto event : query_event_pair->second) {
5497                            dev_data->eventMap[event].needsSignaled = true;
5498                        }
5499                    }
5500                }
5501            }
5502        }
5503    }
5504}
5505
5506VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
5507                                                   size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
5508    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5509    unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
5510    std::unique_lock<std::mutex> lock(global_lock);
5511    bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
5512    lock.unlock();
5513    if (skip)
5514        return VK_ERROR_VALIDATION_FAILED_EXT;
5515    VkResult result =
5516        dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5517    lock.lock();
5518    PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
5519    lock.unlock();
5520    return result;
5521}
5522
5523static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5524    bool skip_call = false;
5525    auto buffer_state = getBufferState(my_data, buffer);
5526    if (!buffer_state) {
5527        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5528                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5529                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5530    } else {
5531        if (buffer_state->in_use.load()) {
5532            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5533                                 (uint64_t)(buffer), __LINE__, VALIDATION_ERROR_00676, "DS",
5534                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer. %s", (uint64_t)(buffer),
5535                                 validation_error_map[VALIDATION_ERROR_00676]);
5536        }
5537    }
5538    return skip_call;
5539}
5540
5541// Return true if given ranges intersect, else false
5542// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5543//  in an error so not checking that here
5544// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5545// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5546//  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5547static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5548    *skip_call = false;
5549    auto r1_start = range1->start;
5550    auto r1_end = range1->end;
5551    auto r2_start = range2->start;
5552    auto r2_end = range2->end;
5553    VkDeviceSize pad_align = 1;
5554    if (range1->linear != range2->linear) {
5555        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5556    }
5557    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5558        return false;
5559    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5560        return false;
5561
5562    if (range1->linear != range2->linear) {
5563        // In linear vs. non-linear case, warn of aliasing
5564        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5565        const char *r1_type_str = range1->image ? "image" : "buffer";
5566        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5567        const char *r2_type_str = range2->image ? "image" : "buffer";
5568        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5569        *skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
5570                              MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5571                                                                " which may indicate a bug. For further info refer to the "
5572                                                                "Buffer-Image Granularity section of the Vulkan specification. "
5573                                                                "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
5574                                                                "xhtml/vkspec.html#resources-bufferimagegranularity)",
5575                              r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5576    }
5577    // Ranges intersect
5578    return true;
5579}
5580// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5581static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5582    // Create a local MEMORY_RANGE struct to wrap offset/size
5583    MEMORY_RANGE range_wrap;
5584    // Synch linear with range1 to avoid padding and potential validation error case
5585    range_wrap.linear = range1->linear;
5586    range_wrap.start = offset;
5587    range_wrap.end = end;
5588    bool tmp_bool;
5589    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5590}
5591// For given mem_info, set all ranges valid that intersect [offset-end] range
5592// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5593static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5594    bool tmp_bool = false;
5595    MEMORY_RANGE map_range = {};
5596    map_range.linear = true;
5597    map_range.start = offset;
5598    map_range.end = end;
5599    for (auto &handle_range_pair : mem_info->bound_ranges) {
5600        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5601            // TODO : WARN here if tmp_bool true?
5602            handle_range_pair.second.valid = true;
5603        }
5604    }
5605}
5606// Object with given handle is being bound to memory w/ given mem_info struct.
5607//  Track the newly bound memory range with given memoryOffset
5608//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5609//  and non-linear range incorrectly overlap.
5610// Return true if an error is flagged and the user callback returns "true", otherwise false
5611// is_image indicates an image object, otherwise handle is for a buffer
5612// is_linear indicates a buffer or linear image
5613static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5614                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5615    bool skip_call = false;
5616    MEMORY_RANGE range;
5617
5618    range.image = is_image;
5619    range.handle = handle;
5620    range.linear = is_linear;
5621    range.valid = mem_info->global_valid;
5622    range.memory = mem_info->mem;
5623    range.start = memoryOffset;
5624    range.size = memRequirements.size;
5625    range.end = memoryOffset + memRequirements.size - 1;
5626    range.aliases.clear();
5627    // Update Memory aliasing
5628    // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5629    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5630    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5631    for (auto &obj_range_pair : mem_info->bound_ranges) {
5632        auto check_range = &obj_range_pair.second;
5633        bool intersection_error = false;
5634        if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5635            skip_call |= intersection_error;
5636            range.aliases.insert(check_range);
5637            tmp_alias_ranges.insert(check_range);
5638        }
5639    }
5640    mem_info->bound_ranges[handle] = std::move(range);
5641    for (auto tmp_range : tmp_alias_ranges) {
5642        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5643    }
5644    if (is_image)
5645        mem_info->bound_images.insert(handle);
5646    else
5647        mem_info->bound_buffers.insert(handle);
5648
5649    return skip_call;
5650}
5651
5652static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5653                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5654    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5655}
5656
5657static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5658                                    VkMemoryRequirements mem_reqs) {
5659    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5660}
5661
5662// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5663//  is_image indicates if handle is for image or buffer
5664//  This function will also remove the handle-to-index mapping from the appropriate
5665//  map and clean up any aliases for range being removed.
5666static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5667    auto erase_range = &mem_info->bound_ranges[handle];
5668    for (auto alias_range : erase_range->aliases) {
5669        alias_range->aliases.erase(erase_range);
5670    }
5671    erase_range->aliases.clear();
5672    mem_info->bound_ranges.erase(handle);
5673    if (is_image) {
5674        mem_info->bound_images.erase(handle);
5675    } else {
5676        mem_info->bound_buffers.erase(handle);
5677    }
5678}
5679
5680static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5681
5682static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5683
5684static bool PreCallValidateDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE **buffer_state,
5685                                         VK_OBJECT *obj_struct) {
5686    *buffer_state = getBufferState(dev_data, buffer);
5687    *obj_struct = {reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT};
5688    if (dev_data->instance_data->disabled.destroy_buffer)
5689        return false;
5690    bool skip = false;
5691    if (*buffer_state) {
5692        skip |= validateIdleBuffer(dev_data, buffer);
5693    }
5694    return skip;
5695}
5696
5697static void PostCallRecordDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VK_OBJECT obj_struct) {
5698    invalidateCommandBuffers(dev_data, buffer_state->cb_bindings, obj_struct);
5699    for (auto mem_binding : buffer_state->GetBoundMemory()) {
5700        auto mem_info = getMemObjInfo(dev_data, mem_binding);
5701        if (mem_info) {
5702            RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5703        }
5704    }
5705    ClearMemoryObjectBindings(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5706    dev_data->bufferMap.erase(buffer_state->buffer);
5707}
5708
5709VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5710                                         const VkAllocationCallbacks *pAllocator) {
5711    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5712    BUFFER_STATE *buffer_state = nullptr;
5713    VK_OBJECT obj_struct;
5714    std::unique_lock<std::mutex> lock(global_lock);
5715    bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
5716    if (!skip) {
5717        lock.unlock();
5718        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
5719        lock.lock();
5720        PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
5721    }
5722}
5723
5724static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5725                                             VK_OBJECT *obj_struct) {
5726    *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5727    *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
5728    if (dev_data->instance_data->disabled.destroy_buffer_view)
5729        return false;
5730    bool skip = false;
5731    if (*buffer_view_state) {
5732        skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct, VALIDATION_ERROR_00701);
5733    }
5734    return skip;
5735}
5736
5737static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5738                                            VK_OBJECT obj_struct) {
5739    // Any bound cmd buffers are now invalid
5740    invalidateCommandBuffers(dev_data, buffer_view_state->cb_bindings, obj_struct);
5741    dev_data->bufferViewMap.erase(buffer_view);
5742}
5743
5744VKAPI_ATTR void VKAPI_CALL
5745DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5746    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5747    // Common data objects used pre & post call
5748    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5749    VK_OBJECT obj_struct;
5750    std::unique_lock<std::mutex> lock(global_lock);
5751    // Validate state before calling down chain, update common data if we'll be calling down chain
5752    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
5753    if (!skip) {
5754        lock.unlock();
5755        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
5756        lock.lock();
5757        PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
5758    }
5759}
5760
5761static bool PreCallValidateDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE **image_state, VK_OBJECT *obj_struct) {
5762    *image_state = getImageState(dev_data, image);
5763    *obj_struct = {reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT};
5764    if (dev_data->instance_data->disabled.destroy_image)
5765        return false;
5766    bool skip = false;
5767    if (*image_state) {
5768        skip |= ValidateObjectNotInUse(dev_data, *image_state, *obj_struct, VALIDATION_ERROR_00743);
5769    }
5770    return skip;
5771}
5772
5773static void PostCallRecordDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VK_OBJECT obj_struct) {
5774    invalidateCommandBuffers(dev_data, image_state->cb_bindings, obj_struct);
5775    // Clean up memory mapping, bindings and range references for image
5776    for (auto mem_binding : image_state->GetBoundMemory()) {
5777        auto mem_info = getMemObjInfo(dev_data, mem_binding);
5778        if (mem_info) {
5779            RemoveImageMemoryRange(obj_struct.handle, mem_info);
5780        }
5781    }
5782    ClearMemoryObjectBindings(dev_data, obj_struct.handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5783    // Remove image from imageMap
5784    dev_data->imageMap.erase(image);
5785
5786    const auto &sub_entry = dev_data->imageSubresourceMap.find(image);
5787    if (sub_entry != dev_data->imageSubresourceMap.end()) {
5788        for (const auto &pair : sub_entry->second) {
5789            dev_data->imageLayoutMap.erase(pair);
5790        }
5791        dev_data->imageSubresourceMap.erase(sub_entry);
5792    }
5793}
5794
5795VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5796    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5797    IMAGE_STATE *image_state = nullptr;
5798    VK_OBJECT obj_struct;
5799    std::unique_lock<std::mutex> lock(global_lock);
5800    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
5801    if (!skip) {
5802        lock.unlock();
5803        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
5804        lock.lock();
5805        PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
5806    }
5807}
5808
5809static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5810                                const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
5811    bool skip_call = false;
5812    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5813        skip_call =
5814            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5815                    reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, msgCode, "MT",
5816                    "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5817                    "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
5818                    funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
5819                    reinterpret_cast<const uint64_t &>(mem_info->mem), validation_error_map[msgCode]);
5820    }
5821    return skip_call;
5822}
5823
5824VKAPI_ATTR VkResult VKAPI_CALL
5825BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5826    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5827    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5828    std::unique_lock<std::mutex> lock(global_lock);
5829    // Track objects tied to memory
5830    uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5831    bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5832    auto buffer_state = getBufferState(dev_data, buffer);
5833    if (buffer_state) {
5834        if (!buffer_state->memory_requirements_checked) {
5835            // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
5836            //  BindBufferMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
5837            //  vkGetBufferMemoryRequirements()
5838            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5839                                 buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
5840                                 "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
5841                                 " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
5842                                 buffer_handle);
5843            // Make the call for them so we can verify the state
5844            lock.unlock();
5845            dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &buffer_state->requirements);
5846            lock.lock();
5847        }
5848        buffer_state->binding.mem = mem;
5849        buffer_state->binding.offset = memoryOffset;
5850        buffer_state->binding.size = buffer_state->requirements.size;
5851
5852        // Track and validate bound memory range information
5853        auto mem_info = getMemObjInfo(dev_data, mem);
5854        if (mem_info) {
5855            skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
5856            skip_call |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
5857                                             VALIDATION_ERROR_00797);
5858        }
5859
5860        // Validate memory requirements alignment
5861        if (vk_safe_modulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
5862            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5863                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_02174, "DS",
5864                                 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5865                                 "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5866                                 ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
5867                                 memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_02174]);
5868        }
5869
5870        // Validate device limits alignments
5871        static const VkBufferUsageFlagBits usage_list[3] = {
5872            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5873            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5874            VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5875        static const char *memory_type[3] = {"texel",
5876                                             "uniform",
5877                                             "storage"};
5878        static const char *offset_name[3] = {
5879            "minTexelBufferOffsetAlignment",
5880            "minUniformBufferOffsetAlignment",
5881            "minStorageBufferOffsetAlignment"
5882        };
5883        static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = {
5884            VALIDATION_ERROR_00794,
5885            VALIDATION_ERROR_00795,
5886            VALIDATION_ERROR_00796
5887        };
5888
5889        // Keep this one fresh!
5890        const VkDeviceSize offset_requirement[3] = {
5891            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5892            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5893            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5894        };
5895        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5896
5897        for (int i = 0; i < 3; i++) {
5898            if (usage & usage_list[i]) {
5899                if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5900                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5901                                         VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, msgCode[i], "DS",
5902                                         "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5903                                         "device limit %s 0x%" PRIxLEAST64 ". %s",
5904                                         memory_type[i], memoryOffset, offset_name[i], offset_requirement[i],
5905                                         validation_error_map[msgCode[i]]);
5906                }
5907            }
5908        }
5909    }
5910    lock.unlock();
5911    if (!skip_call) {
5912        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
5913    }
5914    return result;
5915}
5916
5917VKAPI_ATTR void VKAPI_CALL
5918GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5919    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5920    dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5921    auto buffer_state = getBufferState(dev_data, buffer);
5922    if (buffer_state) {
5923        buffer_state->requirements = *pMemoryRequirements;
5924        buffer_state->memory_requirements_checked = true;
5925    }
5926}
5927
5928VKAPI_ATTR void VKAPI_CALL
5929GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5930    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5931    dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5932    auto image_state = getImageState(dev_data, image);
5933    if (image_state) {
5934        image_state->requirements = *pMemoryRequirements;
5935        image_state->memory_requirements_checked = true;
5936    }
5937}
5938
5939static bool PreCallValidateDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state,
5940                                            VK_OBJECT *obj_struct) {
5941    *image_view_state = getImageViewState(dev_data, image_view);
5942    *obj_struct = {reinterpret_cast<uint64_t &>(image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5943    if (dev_data->instance_data->disabled.destroy_image_view)
5944        return false;
5945    bool skip = false;
5946    if (*image_view_state) {
5947        skip |= ValidateObjectNotInUse(dev_data, *image_view_state, *obj_struct, VALIDATION_ERROR_00776);
5948    }
5949    return skip;
5950}
5951
5952static void PostCallRecordDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state,
5953                                           VK_OBJECT obj_struct) {
5954    // Any bound cmd buffers are now invalid
5955    invalidateCommandBuffers(dev_data, image_view_state->cb_bindings, obj_struct);
5956    dev_data->imageViewMap.erase(image_view);
5957}
5958
5959VKAPI_ATTR void VKAPI_CALL
5960DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5961    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5962    // Common data objects used pre & post call
5963    IMAGE_VIEW_STATE *image_view_state = nullptr;
5964    VK_OBJECT obj_struct;
5965    std::unique_lock<std::mutex> lock(global_lock);
5966    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
5967    if (!skip) {
5968        lock.unlock();
5969        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
5970        lock.lock();
5971        PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
5972    }
5973}
5974
5975VKAPI_ATTR void VKAPI_CALL
5976DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5977    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5978
5979    std::unique_lock<std::mutex> lock(global_lock);
5980    my_data->shaderModuleMap.erase(shaderModule);
5981    lock.unlock();
5982
5983    my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
5984}
5985
5986static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
5987                                           VK_OBJECT *obj_struct) {
5988    *pipeline_state = getPipelineState(dev_data, pipeline);
5989    *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
5990    if (dev_data->instance_data->disabled.destroy_pipeline)
5991        return false;
5992    bool skip = false;
5993    if (*pipeline_state) {
5994        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555);
5995    }
5996    return skip;
5997}
5998
5999static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
6000                                          VK_OBJECT obj_struct) {
6001    // Any bound cmd buffers are now invalid
6002    invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
6003    dev_data->pipelineMap.erase(pipeline);
6004}
6005
6006VKAPI_ATTR void VKAPI_CALL
6007DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6008    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6009    PIPELINE_STATE *pipeline_state = nullptr;
6010    VK_OBJECT obj_struct;
6011    std::unique_lock<std::mutex> lock(global_lock);
6012    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
6013    if (!skip) {
6014        lock.unlock();
6015        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
6016        lock.lock();
6017        PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
6018    }
6019}
6020
6021VKAPI_ATTR void VKAPI_CALL
6022DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6023    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6024    std::unique_lock<std::mutex> lock(global_lock);
6025    dev_data->pipelineLayoutMap.erase(pipelineLayout);
6026    lock.unlock();
6027
6028    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6029}
6030
6031static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
6032                                          VK_OBJECT *obj_struct) {
6033    *sampler_state = getSamplerState(dev_data, sampler);
6034    *obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
6035    if (dev_data->instance_data->disabled.destroy_sampler)
6036        return false;
6037    bool skip = false;
6038    if (*sampler_state) {
6039        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_00837);
6040    }
6041    return skip;
6042}
6043
6044static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
6045                                         VK_OBJECT obj_struct) {
6046    // Any bound cmd buffers are now invalid
6047    if (sampler_state)
6048        invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
6049    dev_data->samplerMap.erase(sampler);
6050}
6051
6052VKAPI_ATTR void VKAPI_CALL
6053DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6054    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6055    SAMPLER_STATE *sampler_state = nullptr;
6056    VK_OBJECT obj_struct;
6057    std::unique_lock<std::mutex> lock(global_lock);
6058    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
6059    if (!skip) {
6060        lock.unlock();
6061        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
6062        lock.lock();
6063        PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
6064    }
6065}
6066
6067static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
6068    dev_data->descriptorSetLayoutMap.erase(ds_layout);
6069}
6070
6071VKAPI_ATTR void VKAPI_CALL
6072DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6073    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6074    dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6075    std::unique_lock<std::mutex> lock(global_lock);
6076    PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
6077}
6078
6079static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
6080                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
6081    *desc_pool_state = getDescriptorPoolState(dev_data, pool);
6082    *obj_struct = {reinterpret_cast<uint64_t &>(pool), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT};
6083    if (dev_data->instance_data->disabled.destroy_descriptor_pool)
6084        return false;
6085    bool skip = false;
6086    if (*desc_pool_state) {
6087        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901);
6088    }
6089    return skip;
6090}
6091
6092static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
6093                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
6094    // Any bound cmd buffers are now invalid
6095    invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
6096    // Free sets that were in this pool
6097    for (auto ds : desc_pool_state->sets) {
6098        freeDescriptorSet(dev_data, ds);
6099    }
6100    dev_data->descriptorPoolMap.erase(descriptorPool);
6101}
6102
6103VKAPI_ATTR void VKAPI_CALL
6104DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6105    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6106    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
6107    VK_OBJECT obj_struct;
6108    std::unique_lock<std::mutex> lock(global_lock);
6109    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
6110    if (!skip) {
6111        lock.unlock();
6112        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
6113        lock.lock();
6114        PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
6115    }
6116}
6117// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
6118//  If this is a secondary command buffer, then make sure its primary is also in-flight
6119//  If primary is not in-flight, then remove secondary from global in-flight set
6120// This function is only valid at a point when cmdBuffer is being reset or freed
6121static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
6122                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
6123    bool skip_call = false;
6124    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
6125        // Primary CB or secondary where primary is also in-flight is an error
6126        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
6127            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
6128            skip_call |=
6129                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6130                        reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, error_code, "DS",
6131                        "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
6132                        validation_error_map[error_code]);
6133        }
6134    }
6135    return skip_call;
6136}
6137
6138// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
6139static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
6140                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
6141    bool skip_call = false;
6142    for (auto cmd_buffer : pPool->commandBuffers) {
6143        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
6144            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action, error_code);
6145        }
6146    }
6147    return skip_call;
6148}
6149
6150static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
6151    for (auto cmd_buffer : pPool->commandBuffers) {
6152        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
6153    }
6154}
6155
6156VKAPI_ATTR void VKAPI_CALL
6157FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6158    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6159    bool skip_call = false;
6160    std::unique_lock<std::mutex> lock(global_lock);
6161
6162    for (uint32_t i = 0; i < commandBufferCount; i++) {
6163        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6164        // Delete CB information structure, and remove from commandBufferMap
6165        if (cb_node) {
6166            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_00096);
6167        }
6168    }
6169
6170    if (skip_call)
6171        return;
6172
6173    auto pPool = getCommandPoolNode(dev_data, commandPool);
6174    for (uint32_t i = 0; i < commandBufferCount; i++) {
6175        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6176        // Delete CB information structure, and remove from commandBufferMap
6177        if (cb_node) {
6178            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
6179            // reset prior to delete for data clean-up
6180            resetCB(dev_data, cb_node->commandBuffer);
6181            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
6182            delete cb_node;
6183        }
6184
6185        // Remove commandBuffer reference from commandPoolMap
6186        pPool->commandBuffers.remove(pCommandBuffers[i]);
6187    }
6188    lock.unlock();
6189
6190    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6191}
6192
6193VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6194                                                 const VkAllocationCallbacks *pAllocator,
6195                                                 VkCommandPool *pCommandPool) {
6196    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6197
6198    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6199
6200    if (VK_SUCCESS == result) {
6201        std::lock_guard<std::mutex> lock(global_lock);
6202        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6203        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6204    }
6205    return result;
6206}
6207
6208VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6209                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6210
6211    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6212    bool skip = false;
6213    if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
6214        if (!dev_data->enabled_features.pipelineStatisticsQuery) {
6215            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
6216                            __LINE__, VALIDATION_ERROR_01006, "DS",
6217                            "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
6218                            "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
6219                            validation_error_map[VALIDATION_ERROR_01006]);
6220        }
6221    }
6222
6223    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6224    if (!skip) {
6225        result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6226    }
6227    if (result == VK_SUCCESS) {
6228        std::lock_guard<std::mutex> lock(global_lock);
6229        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6230        qp_node->createInfo = *pCreateInfo;
6231    }
6232    return result;
6233}
6234
6235static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
6236    *cp_state = getCommandPoolNode(dev_data, pool);
6237    if (dev_data->instance_data->disabled.destroy_command_pool)
6238        return false;
6239    bool skip = false;
6240    if (*cp_state) {
6241        // Verify that command buffers in pool are complete (not in-flight)
6242        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_00077);
6243    }
6244    return skip;
6245}
6246
6247static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
6248    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
6249    clearCommandBuffersInFlight(dev_data, cp_state);
6250    for (auto cb : cp_state->commandBuffers) {
6251        clear_cmd_buf_and_mem_references(dev_data, cb);
6252        auto cb_node = getCBNode(dev_data, cb);
6253        // Remove references to this cb_node prior to delete
6254        // TODO : Need better solution here, resetCB?
6255        for (auto obj : cb_node->object_bindings) {
6256            removeCommandBufferBinding(dev_data, &obj, cb_node);
6257        }
6258        for (auto framebuffer : cb_node->framebuffers) {
6259            auto fb_state = getFramebufferState(dev_data, framebuffer);
6260            if (fb_state)
6261                fb_state->cb_bindings.erase(cb_node);
6262        }
6263        dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6264        delete cb_node;                       // delete CB info structure
6265    }
6266    dev_data->commandPoolMap.erase(pool);
6267}
6268
6269// Destroy commandPool along with all of the commandBuffers allocated from that pool
6270VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6271    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6272    COMMAND_POOL_NODE *cp_state = nullptr;
6273    std::unique_lock<std::mutex> lock(global_lock);
6274    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
6275    if (!skip) {
6276        lock.unlock();
6277        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
6278        lock.lock();
6279        PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
6280    }
6281}
6282
6283VKAPI_ATTR VkResult VKAPI_CALL
6284ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6285    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6286    bool skip_call = false;
6287
6288    std::unique_lock<std::mutex> lock(global_lock);
6289    auto pPool = getCommandPoolNode(dev_data, commandPool);
6290    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_00072);
6291    lock.unlock();
6292
6293    if (skip_call)
6294        return VK_ERROR_VALIDATION_FAILED_EXT;
6295
6296    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
6297
6298    // Reset all of the CBs allocated from this pool
6299    if (VK_SUCCESS == result) {
6300        lock.lock();
6301        clearCommandBuffersInFlight(dev_data, pPool);
6302        for (auto cmdBuffer : pPool->commandBuffers) {
6303            resetCB(dev_data, cmdBuffer);
6304        }
6305        lock.unlock();
6306    }
6307    return result;
6308}
6309
6310VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6311    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6312    bool skip_call = false;
6313    std::unique_lock<std::mutex> lock(global_lock);
6314    for (uint32_t i = 0; i < fenceCount; ++i) {
6315        auto pFence = getFenceNode(dev_data, pFences[i]);
6316        if (pFence && pFence->state == FENCE_INFLIGHT) {
6317            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6318                                 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, VALIDATION_ERROR_00183, "DS",
6319                                 "Fence 0x%" PRIx64 " is in use. %s", reinterpret_cast<const uint64_t &>(pFences[i]),
6320                                 validation_error_map[VALIDATION_ERROR_00183]);
6321        }
6322    }
6323    lock.unlock();
6324
6325    if (skip_call)
6326        return VK_ERROR_VALIDATION_FAILED_EXT;
6327
6328    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
6329
6330    if (result == VK_SUCCESS) {
6331        lock.lock();
6332        for (uint32_t i = 0; i < fenceCount; ++i) {
6333            auto pFence = getFenceNode(dev_data, pFences[i]);
6334            if (pFence) {
6335                pFence->state = FENCE_UNSIGNALED;
6336            }
6337        }
6338        lock.unlock();
6339    }
6340
6341    return result;
6342}
6343
6344// For given cb_nodes, invalidate them and track object causing invalidation
6345void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
6346    for (auto cb_node : cb_nodes) {
6347        if (cb_node->state == CB_RECORDING) {
6348            log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6349                    (uint64_t)(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6350                    "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
6351        }
6352        cb_node->state = CB_INVALID;
6353        cb_node->broken_bindings.push_back(obj);
6354    }
6355}
6356
6357static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
6358                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
6359    *framebuffer_state = getFramebufferState(dev_data, framebuffer);
6360    *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT};
6361    if (dev_data->instance_data->disabled.destroy_framebuffer)
6362        return false;
6363    bool skip = false;
6364    if (*framebuffer_state) {
6365        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422);
6366    }
6367    return skip;
6368}
6369
6370static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
6371                                             VK_OBJECT obj_struct) {
6372    invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
6373    dev_data->frameBufferMap.erase(framebuffer);
6374}
6375
6376VKAPI_ATTR void VKAPI_CALL
6377DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6378    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6379    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
6380    VK_OBJECT obj_struct;
6381    std::unique_lock<std::mutex> lock(global_lock);
6382    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
6383    if (!skip) {
6384        lock.unlock();
6385        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
6386        lock.lock();
6387        PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
6388    }
6389}
6390
6391static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
6392                                             VK_OBJECT *obj_struct) {
6393    *rp_state = getRenderPassState(dev_data, render_pass);
6394    *obj_struct = {reinterpret_cast<uint64_t &>(render_pass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
6395    if (dev_data->instance_data->disabled.destroy_renderpass)
6396        return false;
6397    bool skip = false;
6398    if (*rp_state) {
6399        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_00393);
6400    }
6401    return skip;
6402}
6403
6404static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
6405                                            VK_OBJECT obj_struct) {
6406    invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
6407    dev_data->renderPassMap.erase(render_pass);
6408}
6409
6410VKAPI_ATTR void VKAPI_CALL
6411DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6412    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6413    RENDER_PASS_STATE *rp_state = nullptr;
6414    VK_OBJECT obj_struct;
6415    std::unique_lock<std::mutex> lock(global_lock);
6416    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
6417    if (!skip) {
6418        lock.unlock();
6419        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
6420        lock.lock();
6421        PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
6422    }
6423}
6424
6425VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6426                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6427    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6428    // TODO: Add check for VALIDATION_ERROR_00658
6429    // TODO: Add check for VALIDATION_ERROR_00666
6430    // TODO: Add check for VALIDATION_ERROR_00667
6431    // TODO: Add check for VALIDATION_ERROR_00668
6432    // TODO: Add check for VALIDATION_ERROR_00669
6433    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6434
6435    if (VK_SUCCESS == result) {
6436        std::lock_guard<std::mutex> lock(global_lock);
6437        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6438        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_STATE>(new BUFFER_STATE(*pBuffer, pCreateInfo))));
6439    }
6440    return result;
6441}
6442
6443static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6444    bool skip_call = false;
6445    BUFFER_STATE *buffer_state = getBufferState(dev_data, pCreateInfo->buffer);
6446    // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6447    if (buffer_state) {
6448        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCreateBufferView()", VALIDATION_ERROR_02522);
6449        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6450        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6451        skip_call |= ValidateBufferUsageFlags(
6452            dev_data, buffer_state, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
6453            VALIDATION_ERROR_00694, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6454    }
6455    return skip_call;
6456}
6457
6458VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6459                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6460    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6461    std::unique_lock<std::mutex> lock(global_lock);
6462    bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6463    lock.unlock();
6464    if (skip_call)
6465        return VK_ERROR_VALIDATION_FAILED_EXT;
6466    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
6467    if (VK_SUCCESS == result) {
6468        lock.lock();
6469        dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6470        lock.unlock();
6471    }
6472    return result;
6473}
6474
6475VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6476                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6477    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6478
6479    VkResult result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
6480
6481    if (VK_SUCCESS == result) {
6482        std::lock_guard<std::mutex> lock(global_lock);
6483        IMAGE_LAYOUT_NODE image_state;
6484        image_state.layout = pCreateInfo->initialLayout;
6485        image_state.format = pCreateInfo->format;
6486        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_STATE>(new IMAGE_STATE(*pImage, pCreateInfo))));
6487        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6488        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6489        dev_data->imageLayoutMap[subpair] = image_state;
6490    }
6491    return result;
6492}
6493
6494static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6495    // Expects global_lock to be held by caller
6496
6497    auto image_state = getImageState(dev_data, image);
6498    if (image_state) {
6499        // If the caller used the special values VK_REMAINING_MIP_LEVELS and VK_REMAINING_ARRAY_LAYERS, resolve them now in our
6500        // internal state to the actual values.
6501        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6502            range->levelCount = image_state->createInfo.mipLevels - range->baseMipLevel;
6503        }
6504
6505        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6506            range->layerCount = image_state->createInfo.arrayLayers - range->baseArrayLayer;
6507        }
6508    }
6509}
6510
6511// Return the correct layer/level counts if the caller used the special
6512// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6513static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6514                                         VkImage image) {
6515    // Expects global_lock to be held by caller
6516
6517    *levels = range.levelCount;
6518    *layers = range.layerCount;
6519    auto image_state = getImageState(dev_data, image);
6520    if (image_state) {
6521        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6522            *levels = image_state->createInfo.mipLevels - range.baseMipLevel;
6523        }
6524        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6525            *layers = image_state->createInfo.arrayLayers - range.baseArrayLayer;
6526        }
6527    }
6528}
6529
6530// For the given format verify that the aspect masks make sense
6531static bool ValidateImageAspectMask(layer_data *dev_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
6532                                    const char *func_name) {
6533    bool skip = false;
6534    if (vk_format_is_color(format)) {
6535        if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
6536            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6537                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6538                            "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6539                            validation_error_map[VALIDATION_ERROR_00741]);
6540        } else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
6541            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6542                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6543                            "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6544                            validation_error_map[VALIDATION_ERROR_00741]);
6545        }
6546    } else if (vk_format_is_depth_and_stencil(format)) {
6547        if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
6548            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6549                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", "%s: Depth/stencil image formats must have "
6550                                                                                        "at least one of VK_IMAGE_ASPECT_DEPTH_BIT "
6551                                                                                        "and VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6552                            func_name, validation_error_map[VALIDATION_ERROR_00741]);
6553        } else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
6554            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6555                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6556                            "%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
6557                            "VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6558                            func_name, validation_error_map[VALIDATION_ERROR_00741]);
6559        }
6560    } else if (vk_format_is_depth_only(format)) {
6561        if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
6562            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6563                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6564                            "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6565                            validation_error_map[VALIDATION_ERROR_00741]);
6566        } else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
6567            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6568                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6569                            "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6570                            validation_error_map[VALIDATION_ERROR_00741]);
6571        }
6572    } else if (vk_format_is_stencil_only(format)) {
6573        if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
6574            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6575                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6576                            "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6577                            validation_error_map[VALIDATION_ERROR_00741]);
6578        } else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
6579            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6580                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6581                            "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6582                            validation_error_map[VALIDATION_ERROR_00741]);
6583        }
6584    }
6585    return skip;
6586}
6587
6588static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info) {
6589    bool skip = false;
6590    IMAGE_STATE *image_state = getImageState(dev_data, create_info->image);
6591    if (image_state) {
6592        skip |= ValidateImageUsageFlags(
6593            dev_data, image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6594                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6595            false, -1, "vkCreateImageView()",
6596            "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6597        // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6598        skip |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCreateImageView()", VALIDATION_ERROR_02524);
6599        // Checks imported from image layer
6600        if (create_info->subresourceRange.baseMipLevel >= image_state->createInfo.mipLevels) {
6601            std::stringstream ss;
6602            ss << "vkCreateImageView called with baseMipLevel " << create_info->subresourceRange.baseMipLevel << " for image "
6603               << create_info->image << " that only has " << image_state->createInfo.mipLevels << " mip levels.";
6604            skip |=
6605                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6606                        VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6607        }
6608        if (create_info->subresourceRange.baseArrayLayer >= image_state->createInfo.arrayLayers) {
6609            std::stringstream ss;
6610            ss << "vkCreateImageView called with baseArrayLayer " << create_info->subresourceRange.baseArrayLayer << " for image "
6611               << create_info->image << " that only has " << image_state->createInfo.arrayLayers << " array layers.";
6612            skip |=
6613                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6614                        VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6615        }
6616        // TODO: Need new valid usage language for levelCount == 0 & layerCount == 0
6617        if (!create_info->subresourceRange.levelCount) {
6618            std::stringstream ss;
6619            ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.levelCount.";
6620            skip |=
6621                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6622                        VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6623        }
6624        if (!create_info->subresourceRange.layerCount) {
6625            std::stringstream ss;
6626            ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.layerCount.";
6627            skip |=
6628                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6629                        VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6630        }
6631
6632        VkImageCreateFlags image_flags = image_state->createInfo.flags;
6633        VkFormat image_format = image_state->createInfo.format;
6634        VkFormat view_format = create_info->format;
6635        VkImageAspectFlags aspect_mask = create_info->subresourceRange.aspectMask;
6636
6637        // Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state
6638        if (image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
6639            // Format MUST be compatible (in the same format compatibility class) as the format the image was created with
6640            if (vk_format_get_compatibility_class(image_format) != vk_format_get_compatibility_class(view_format)) {
6641                std::stringstream ss;
6642                ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
6643                   << " is not in the same format compatibility class as image (" << (uint64_t)create_info->image << ")  format "
6644                   << string_VkFormat(image_format) << ".  Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
6645                   << "can support ImageViews with differing formats but they must be in the same compatibility class.";
6646                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6647                                VALIDATION_ERROR_02171, "IMAGE", "%s %s", ss.str().c_str(),
6648                                validation_error_map[VALIDATION_ERROR_02171]);
6649            }
6650        } else {
6651            // Format MUST be IDENTICAL to the format the image was created with
6652            if (image_format != view_format) {
6653                std::stringstream ss;
6654                ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from image "
6655                   << (uint64_t)create_info->image << " format " << string_VkFormat(image_format)
6656                   << ".  Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
6657                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6658                                VALIDATION_ERROR_02172, "IMAGE", "%s %s", ss.str().c_str(),
6659                                validation_error_map[VALIDATION_ERROR_02172]);
6660            }
6661        }
6662
6663        // Validate correct image aspect bits for desired formats and format consistency
6664        skip |= ValidateImageAspectMask(dev_data, image_state->image, image_format, aspect_mask, "vkCreateImageView()");
6665    }
6666    return skip;
6667}
6668
6669static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info, VkImageView view) {
6670    dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, create_info));
6671    ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, create_info->image);
6672}
6673
6674VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6675                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6676    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6677    std::unique_lock<std::mutex> lock(global_lock);
6678    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6679    lock.unlock();
6680    if (skip)
6681        return VK_ERROR_VALIDATION_FAILED_EXT;
6682    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
6683    if (VK_SUCCESS == result) {
6684        lock.lock();
6685        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6686        lock.unlock();
6687    }
6688
6689    return result;
6690}
6691
6692VKAPI_ATTR VkResult VKAPI_CALL
6693CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6694    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6695    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
6696    if (VK_SUCCESS == result) {
6697        std::lock_guard<std::mutex> lock(global_lock);
6698        auto &fence_node = dev_data->fenceMap[*pFence];
6699        fence_node.fence = *pFence;
6700        fence_node.createInfo = *pCreateInfo;
6701        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6702    }
6703    return result;
6704}
6705
6706// TODO handle pipeline caches
6707VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6708                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6709    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6710    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6711    return result;
6712}
6713
6714VKAPI_ATTR void VKAPI_CALL
6715DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6716    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6717    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
6718}
6719
6720VKAPI_ATTR VkResult VKAPI_CALL
6721GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6722    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6723    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6724    return result;
6725}
6726
6727VKAPI_ATTR VkResult VKAPI_CALL
6728MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6729    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6730    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6731    return result;
6732}
6733
6734// utility function to set collective state for pipeline
6735void set_pipeline_state(PIPELINE_STATE *pPipe) {
6736    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6737    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6738        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6739            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6740                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6741                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6742                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6743                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6744                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6745                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6746                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6747                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6748                    pPipe->blendConstantsEnabled = true;
6749                }
6750            }
6751        }
6752    }
6753}
6754
6755static bool PreCallCreateGraphicsPipelines(layer_data *device_data, uint32_t count,
6756                                           const VkGraphicsPipelineCreateInfo *create_infos, vector<PIPELINE_STATE *> &pipe_state) {
6757    bool skip = false;
6758    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
6759
6760    for (uint32_t i = 0; i < count; i++) {
6761        skip |= verifyPipelineCreateState(device_data, pipe_state, i);
6762        if (create_infos[i].pVertexInputState != NULL) {
6763            for (uint32_t j = 0; j < create_infos[i].pVertexInputState->vertexAttributeDescriptionCount; j++) {
6764                VkFormat format = create_infos[i].pVertexInputState->pVertexAttributeDescriptions[j].format;
6765                // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
6766                VkFormatProperties properties;
6767                instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &properties);
6768                if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
6769                    skip |= log_msg(
6770                        device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6771                        __LINE__, VALIDATION_ERROR_01413, "IMAGE",
6772                        "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
6773                        "(%s) is not a supported vertex buffer format. %s",
6774                        i, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_01413]);
6775                }
6776            }
6777        }
6778    }
6779    return skip;
6780}
6781
6782VKAPI_ATTR VkResult VKAPI_CALL
6783CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6784                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6785                        VkPipeline *pPipelines) {
6786    // TODO What to do with pipelineCache?
6787    // The order of operations here is a little convoluted but gets the job done
6788    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
6789    //  2. Create state is then validated (which uses flags setup during shadowing)
6790    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6791    bool skip = false;
6792    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6793    vector<PIPELINE_STATE *> pipe_state(count);
6794    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6795
6796    uint32_t i = 0;
6797    std::unique_lock<std::mutex> lock(global_lock);
6798
6799    for (i = 0; i < count; i++) {
6800        pipe_state[i] = new PIPELINE_STATE;
6801        pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
6802        pipe_state[i]->render_pass_ci.initialize(getRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6803        pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6804    }
6805    skip |= PreCallCreateGraphicsPipelines(dev_data, count, pCreateInfos, pipe_state);
6806
6807    if (skip) {
6808        for (i = 0; i < count; i++) {
6809            delete pipe_state[i];
6810            pPipelines[i] = VK_NULL_HANDLE;
6811        }
6812        return VK_ERROR_VALIDATION_FAILED_EXT;
6813    }
6814
6815    lock.unlock();
6816    auto result = dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6817    lock.lock();
6818    for (i = 0; i < count; i++) {
6819        if (pPipelines[i] == VK_NULL_HANDLE) {
6820            delete pipe_state[i];
6821        }
6822        else {
6823            pipe_state[i]->pipeline = pPipelines[i];
6824            dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
6825        }
6826    }
6827
6828    return result;
6829}
6830
6831VKAPI_ATTR VkResult VKAPI_CALL
6832CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6833                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6834                       VkPipeline *pPipelines) {
6835    bool skip = false;
6836
6837    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6838    vector<PIPELINE_STATE *> pPipeState(count);
6839    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6840
6841    uint32_t i = 0;
6842    std::unique_lock<std::mutex> lock(global_lock);
6843    for (i = 0; i < count; i++) {
6844        // TODO: Verify compute stage bits
6845
6846        // Create and initialize internal tracking data structure
6847        pPipeState[i] = new PIPELINE_STATE;
6848        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
6849        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6850
6851        // TODO: Add Compute Pipeline Verification
6852        skip |= !validate_compute_pipeline(dev_data->report_data, pPipeState[i], &dev_data->enabled_features,
6853                                                dev_data->shaderModuleMap);
6854        // skip |= verifyPipelineCreateState(dev_data, pPipeState[i]);
6855    }
6856
6857    if (skip) {
6858        for (i = 0; i < count; i++) {
6859            // Clean up any locally allocated data structures
6860            delete pPipeState[i];
6861            pPipelines[i] = VK_NULL_HANDLE;
6862        }
6863        return VK_ERROR_VALIDATION_FAILED_EXT;
6864    }
6865
6866    lock.unlock();
6867    auto result = dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6868    lock.lock();
6869    for (i = 0; i < count; i++) {
6870        if (pPipelines[i] == VK_NULL_HANDLE) {
6871            delete pPipeState[i];
6872        }
6873        else {
6874            pPipeState[i]->pipeline = pPipelines[i];
6875            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6876        }
6877    }
6878
6879    return result;
6880}
6881
6882VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6883                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6884    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6885    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6886    if (VK_SUCCESS == result) {
6887        std::lock_guard<std::mutex> lock(global_lock);
6888        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
6889    }
6890    return result;
6891}
6892
6893static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
6894    if (dev_data->instance_data->disabled.create_descriptor_set_layout)
6895        return false;
6896    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
6897}
6898
6899static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
6900                                                    VkDescriptorSetLayout set_layout) {
6901    // TODO: Convert this to unique_ptr to avoid leaks
6902    dev_data->descriptorSetLayoutMap[set_layout] = new cvdescriptorset::DescriptorSetLayout(create_info, set_layout);
6903}
6904
6905VKAPI_ATTR VkResult VKAPI_CALL
6906CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6907                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6908    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6909    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6910    std::unique_lock<std::mutex> lock(global_lock);
6911    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
6912    if (!skip) {
6913        lock.unlock();
6914        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6915        if (VK_SUCCESS == result) {
6916            lock.lock();
6917            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
6918        }
6919    }
6920    return result;
6921}
6922
6923// Used by CreatePipelineLayout and CmdPushConstants.
6924// Note that the index argument is optional and only used by CreatePipelineLayout.
6925static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6926                                      const char *caller_name, uint32_t index = 0) {
6927    if (dev_data->instance_data->disabled.push_constant_range)
6928        return false;
6929    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6930    bool skip_call = false;
6931    // Check that offset + size don't exceed the max.
6932    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6933    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6934        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6935        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6936            if (offset >= maxPushConstantsSize) {
6937                skip_call |=
6938                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6939                            VALIDATION_ERROR_00877, "DS", "%s call has push constants index %u with offset %u that "
6940                                                          "exceeds this device's maxPushConstantSize of %u. %s",
6941                            caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6942            }
6943            if (size > maxPushConstantsSize - offset) {
6944                skip_call |=
6945                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6946                            VALIDATION_ERROR_00880, "DS", "%s call has push constants index %u with offset %u and size %u that "
6947                                                          "exceeds this device's maxPushConstantSize of %u. %s",
6948                            caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00880]);
6949            }
6950        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6951            if (offset >= maxPushConstantsSize) {
6952                skip_call |=
6953                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6954                            VALIDATION_ERROR_00991, "DS", "%s call has push constants index %u with offset %u that "
6955                                                          "exceeds this device's maxPushConstantSize of %u. %s",
6956                            caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00991]);
6957            }
6958            if (size > maxPushConstantsSize - offset) {
6959                skip_call |=
6960                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6961                            VALIDATION_ERROR_00992, "DS", "%s call has push constants index %u with offset %u and size %u that "
6962                                                          "exceeds this device's maxPushConstantSize of %u. %s",
6963                            caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00992]);
6964            }
6965        } else {
6966            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6967                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6968        }
6969    }
6970    // size needs to be non-zero and a multiple of 4.
6971    if ((size == 0) || ((size & 0x3) != 0)) {
6972        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6973            if (size == 0) {
6974                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6975                                     __LINE__, VALIDATION_ERROR_00878, "DS", "%s call has push constants index %u with "
6976                                                                             "size %u. Size must be greater than zero. %s",
6977                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
6978            }
6979            if (size & 0x3) {
6980                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6981                                     __LINE__, VALIDATION_ERROR_00879, "DS", "%s call has push constants index %u with "
6982                                                                             "size %u. Size must be a multiple of 4. %s",
6983                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00879]);
6984            }
6985        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6986            if (size == 0) {
6987                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6988                                     __LINE__, VALIDATION_ERROR_01000, "DS", "%s call has push constants index %u with "
6989                                                                             "size %u. Size must be greater than zero. %s",
6990                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_01000]);
6991            }
6992            if (size & 0x3) {
6993                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6994                                     __LINE__, VALIDATION_ERROR_00990, "DS", "%s call has push constants index %u with "
6995                                                                             "size %u. Size must be a multiple of 4. %s",
6996                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00990]);
6997            }
6998        } else {
6999            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7000                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
7001        }
7002    }
7003    // offset needs to be a multiple of 4.
7004    if ((offset & 0x3) != 0) {
7005        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
7006            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7007                                 VALIDATION_ERROR_02521, "DS", "%s call has push constants index %u with "
7008                                                               "offset %u. Offset must be a multiple of 4. %s",
7009                                 caller_name, index, offset, validation_error_map[VALIDATION_ERROR_02521]);
7010        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
7011            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7012                                 VALIDATION_ERROR_00989, "DS", "%s call has push constants with "
7013                                                               "offset %u. Offset must be a multiple of 4. %s",
7014                                 caller_name, offset, validation_error_map[VALIDATION_ERROR_00989]);
7015        } else {
7016            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7017                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
7018        }
7019    }
7020    return skip_call;
7021}
7022
7023VKAPI_ATTR VkResult VKAPI_CALL
7024CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
7025                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
7026    bool skip_call = false;
7027    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7028    // TODO : Add checks for VALIDATION_ERRORS 865-871
7029    // Push Constant Range checks
7030    uint32_t i, j;
7031    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
7032        skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
7033                                               pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
7034        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
7035            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7036                                 VALIDATION_ERROR_00882, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
7037                                 validation_error_map[VALIDATION_ERROR_00882]);
7038        }
7039    }
7040    if (skip_call)
7041        return VK_ERROR_VALIDATION_FAILED_EXT;
7042
7043    // Each range has been validated.  Now check for overlap between ranges (if they are good).
7044    // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
7045    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
7046        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
7047            const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
7048            const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
7049            const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
7050            const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
7051            if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
7052                skip_call |=
7053                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7054                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
7055                                                                  "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
7056                            i, minA, maxA, j, minB, maxB);
7057            }
7058        }
7059    }
7060
7061    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
7062    if (VK_SUCCESS == result) {
7063        std::lock_guard<std::mutex> lock(global_lock);
7064        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
7065        plNode.layout = *pPipelineLayout;
7066        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
7067        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
7068            plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
7069        }
7070        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
7071        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
7072            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
7073        }
7074    }
7075    return result;
7076}
7077
7078VKAPI_ATTR VkResult VKAPI_CALL
7079CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
7080                     VkDescriptorPool *pDescriptorPool) {
7081    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7082    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
7083    if (VK_SUCCESS == result) {
7084        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7085                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
7086                    (uint64_t)*pDescriptorPool))
7087            return VK_ERROR_VALIDATION_FAILED_EXT;
7088        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
7089        if (NULL == pNewNode) {
7090            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7091                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
7092                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
7093                return VK_ERROR_VALIDATION_FAILED_EXT;
7094        } else {
7095            std::lock_guard<std::mutex> lock(global_lock);
7096            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
7097        }
7098    } else {
7099        // Need to do anything if pool create fails?
7100    }
7101    return result;
7102}
7103
7104VKAPI_ATTR VkResult VKAPI_CALL
7105ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
7106    // TODO : Add checks for VALIDATION_ERROR_00928
7107    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7108    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
7109    if (VK_SUCCESS == result) {
7110        std::lock_guard<std::mutex> lock(global_lock);
7111        clearDescriptorPool(dev_data, device, descriptorPool, flags);
7112    }
7113    return result;
7114}
7115// Ensure the pool contains enough descriptors and descriptor sets to satisfy
7116// an allocation request. Fills common_data with the total number of descriptors of each type required,
7117// as well as DescriptorSetLayout ptrs used for later update.
7118static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
7119                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
7120    if (dev_data->instance_data->disabled.allocate_descriptor_sets)
7121        return false;
7122    // All state checks for AllocateDescriptorSets is done in single function
7123    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
7124}
7125// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
7126static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
7127                                                 VkDescriptorSet *pDescriptorSets,
7128                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
7129    // All the updates are contained in a single cvdescriptorset function
7130    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
7131                                                   &dev_data->setMap, dev_data);
7132}
7133
7134VKAPI_ATTR VkResult VKAPI_CALL
7135AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
7136    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7137    std::unique_lock<std::mutex> lock(global_lock);
7138    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
7139    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
7140    lock.unlock();
7141
7142    if (skip_call)
7143        return VK_ERROR_VALIDATION_FAILED_EXT;
7144
7145    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
7146
7147    if (VK_SUCCESS == result) {
7148        lock.lock();
7149        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
7150        lock.unlock();
7151    }
7152    return result;
7153}
7154// Verify state before freeing DescriptorSets
7155static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
7156                                              const VkDescriptorSet *descriptor_sets) {
7157    if (dev_data->instance_data->disabled.free_descriptor_sets)
7158        return false;
7159    bool skip_call = false;
7160    // First make sure sets being destroyed are not currently in-use
7161    for (uint32_t i = 0; i < count; ++i)
7162        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
7163
7164    DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
7165    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
7166        // Can't Free from a NON_FREE pool
7167        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7168                             reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
7169                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
7170                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
7171                             validation_error_map[VALIDATION_ERROR_00922]);
7172    }
7173    return skip_call;
7174}
7175// Sets have been removed from the pool so update underlying state
7176static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
7177                                             const VkDescriptorSet *descriptor_sets) {
7178    DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
7179    // Update available descriptor sets in pool
7180    pool_state->availableSets += count;
7181
7182    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
7183    for (uint32_t i = 0; i < count; ++i) {
7184        auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
7185        uint32_t type_index = 0, descriptor_count = 0;
7186        for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
7187            type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
7188            descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
7189            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
7190        }
7191        freeDescriptorSet(dev_data, descriptor_set);
7192        pool_state->sets.erase(descriptor_set);
7193    }
7194}
7195
7196VKAPI_ATTR VkResult VKAPI_CALL
7197FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
7198    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7199    // Make sure that no sets being destroyed are in-flight
7200    std::unique_lock<std::mutex> lock(global_lock);
7201    bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
7202    lock.unlock();
7203
7204    if (skip_call)
7205        return VK_ERROR_VALIDATION_FAILED_EXT;
7206    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
7207    if (VK_SUCCESS == result) {
7208        lock.lock();
7209        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
7210        lock.unlock();
7211    }
7212    return result;
7213}
7214// TODO : This is a Proof-of-concept for core validation architecture
7215//  Really we'll want to break out these functions to separate files but
7216//  keeping it all together here to prove out design
7217// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
7218static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7219                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7220                                                const VkCopyDescriptorSet *pDescriptorCopies) {
7221    if (dev_data->instance_data->disabled.update_descriptor_sets)
7222        return false;
7223    // First thing to do is perform map look-ups.
7224    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
7225    //  so we can't just do a single map look-up up-front, but do them individually in functions below
7226
7227    // Now make call(s) that validate state, but don't perform state updates in this function
7228    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
7229    //  namespace which will parse params and make calls into specific class instances
7230    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
7231                                                         descriptorCopyCount, pDescriptorCopies);
7232}
7233// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
7234static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7235                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7236                                               const VkCopyDescriptorSet *pDescriptorCopies) {
7237    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7238                                                 pDescriptorCopies);
7239}
7240
7241VKAPI_ATTR void VKAPI_CALL
7242UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
7243                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
7244    // Only map look-up at top level is for device-level layer_data
7245    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7246    std::unique_lock<std::mutex> lock(global_lock);
7247    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7248                                                         pDescriptorCopies);
7249    lock.unlock();
7250    if (!skip_call) {
7251        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7252                                                      pDescriptorCopies);
7253        lock.lock();
7254        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
7255        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7256                                           pDescriptorCopies);
7257    }
7258}
7259
7260VKAPI_ATTR VkResult VKAPI_CALL
7261AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
7262    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7263    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
7264    if (VK_SUCCESS == result) {
7265        std::unique_lock<std::mutex> lock(global_lock);
7266        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
7267
7268        if (pPool) {
7269            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
7270                // Add command buffer to its commandPool map
7271                pPool->commandBuffers.push_back(pCommandBuffer[i]);
7272                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
7273                // Add command buffer to map
7274                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
7275                resetCB(dev_data, pCommandBuffer[i]);
7276                pCB->createInfo = *pCreateInfo;
7277                pCB->device = device;
7278            }
7279        }
7280        lock.unlock();
7281    }
7282    return result;
7283}
7284
7285// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
7286static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
7287    addCommandBufferBinding(&fb_state->cb_bindings,
7288                            {reinterpret_cast<uint64_t &>(fb_state->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT},
7289                            cb_state);
7290    for (auto attachment : fb_state->attachments) {
7291        auto view_state = attachment.view_state;
7292        if (view_state) {
7293            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
7294        }
7295        auto rp_state = getRenderPassState(dev_data, fb_state->createInfo.renderPass);
7296        if (rp_state) {
7297            addCommandBufferBinding(
7298                &rp_state->cb_bindings,
7299                {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7300        }
7301    }
7302}
7303
7304VKAPI_ATTR VkResult VKAPI_CALL
7305BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
7306    bool skip_call = false;
7307    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7308    std::unique_lock<std::mutex> lock(global_lock);
7309    // Validate command buffer level
7310    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
7311    if (cb_node) {
7312        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
7313        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7314            skip_call |=
7315                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7316                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00104, "MEM",
7317                        "Calling vkBeginCommandBuffer() on active command buffer 0x%p before it has completed. "
7318                        "You must check command buffer fence before this call. %s",
7319                        commandBuffer, validation_error_map[VALIDATION_ERROR_00104]);
7320        }
7321        clear_cmd_buf_and_mem_references(dev_data, cb_node);
7322        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7323            // Secondary Command Buffer
7324            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
7325            if (!pInfo) {
7326                skip_call |=
7327                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7328                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00106, "DS",
7329                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s",
7330                            commandBuffer, validation_error_map[VALIDATION_ERROR_00106]);
7331            } else {
7332                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
7333                    // Object_tracker makes sure these objects are valid
7334                    assert(pInfo->renderPass);
7335                    assert(pInfo->framebuffer);
7336                    string errorString = "";
7337                    auto framebuffer = getFramebufferState(dev_data, pInfo->framebuffer);
7338                    if (framebuffer) {
7339                        if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
7340                            !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
7341                                                             getRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
7342                                                             errorString)) {
7343                            // renderPass that framebuffer was created with must be compatible with local renderPass
7344                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7345                                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7346                                                 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00112, "DS",
7347                                                 "vkBeginCommandBuffer(): Secondary Command "
7348                                                 "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
7349                                                 "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
7350                                                 commandBuffer, reinterpret_cast<const uint64_t &>(pInfo->renderPass),
7351                                                 reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
7352                                                 reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass),
7353                                                 errorString.c_str(), validation_error_map[VALIDATION_ERROR_00112]);
7354                        }
7355                        // Connect this framebuffer and its children to this cmdBuffer
7356                        AddFramebufferBinding(dev_data, cb_node, framebuffer);
7357                    }
7358                }
7359                if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
7360                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7361                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7362                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7363                                         __LINE__, VALIDATION_ERROR_00107, "DS",
7364                                         "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
7365                                         "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7366                                         "support precise occlusion queries. %s",
7367                                         commandBuffer, validation_error_map[VALIDATION_ERROR_00107]);
7368                }
7369            }
7370            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7371                auto renderPass = getRenderPassState(dev_data, pInfo->renderPass);
7372                if (renderPass) {
7373                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
7374                        skip_call |= log_msg(
7375                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7376                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7377                            VALIDATION_ERROR_00111, "DS",
7378                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
7379                            "that is less than the number of subpasses (%d). %s",
7380                            commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
7381                            validation_error_map[VALIDATION_ERROR_00111]);
7382                    }
7383                }
7384            }
7385        }
7386        if (CB_RECORDING == cb_node->state) {
7387            skip_call |=
7388                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7389                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00103, "DS",
7390                        "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
7391                        ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
7392                        commandBuffer, validation_error_map[VALIDATION_ERROR_00103]);
7393        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->last_cmd)) {
7394            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
7395            auto pPool = getCommandPoolNode(dev_data, cmdPool);
7396            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7397                skip_call |=
7398                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7399                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00105, "DS",
7400                            "Call to vkBeginCommandBuffer() on command buffer (0x%p"
7401                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
7402                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7403                            commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00105]);
7404            }
7405            resetCB(dev_data, commandBuffer);
7406        }
7407        // Set updated state here in case implicit reset occurs above
7408        cb_node->state = CB_RECORDING;
7409        cb_node->beginInfo = *pBeginInfo;
7410        if (cb_node->beginInfo.pInheritanceInfo) {
7411            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7412            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
7413            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
7414            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7415                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7416                cb_node->activeRenderPass = getRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
7417                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
7418                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
7419                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
7420            }
7421        }
7422    }
7423    lock.unlock();
7424    if (skip_call) {
7425        return VK_ERROR_VALIDATION_FAILED_EXT;
7426    }
7427    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
7428
7429    return result;
7430}
7431
7432VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
7433    bool skip_call = false;
7434    VkResult result = VK_SUCCESS;
7435    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7436    std::unique_lock<std::mutex> lock(global_lock);
7437    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7438    if (pCB) {
7439        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
7440            !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7441            // This needs spec clarification to update valid usage, see comments in PR:
7442            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
7443            skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_00123);
7444        }
7445        skip_call |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7446        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_END);
7447        for (auto query : pCB->activeQueries) {
7448            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7449                                 VALIDATION_ERROR_00124, "DS",
7450                                 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
7451                                 (uint64_t)(query.pool), query.index, validation_error_map[VALIDATION_ERROR_00124]);
7452        }
7453    }
7454    if (!skip_call) {
7455        lock.unlock();
7456        result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
7457        lock.lock();
7458        if (VK_SUCCESS == result) {
7459            pCB->state = CB_RECORDED;
7460            // Reset CB status flags
7461            pCB->status = 0;
7462        }
7463    } else {
7464        result = VK_ERROR_VALIDATION_FAILED_EXT;
7465    }
7466    lock.unlock();
7467    return result;
7468}
7469
7470VKAPI_ATTR VkResult VKAPI_CALL
7471ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7472    bool skip_call = false;
7473    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7474    std::unique_lock<std::mutex> lock(global_lock);
7475    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7476    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7477    auto pPool = getCommandPoolNode(dev_data, cmdPool);
7478    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7479        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7480                             (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00093, "DS",
7481                             "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
7482                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7483                             commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00093]);
7484    }
7485    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_00092);
7486    lock.unlock();
7487    if (skip_call)
7488        return VK_ERROR_VALIDATION_FAILED_EXT;
7489    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
7490    if (VK_SUCCESS == result) {
7491        lock.lock();
7492        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
7493        resetCB(dev_data, commandBuffer);
7494        lock.unlock();
7495    }
7496    return result;
7497}
7498
7499VKAPI_ATTR void VKAPI_CALL
7500CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7501    bool skip = false;
7502    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7503    std::unique_lock<std::mutex> lock(global_lock);
7504    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
7505    if (cb_state) {
7506        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7507        UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_BINDPIPELINE);
7508        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
7509            skip |=
7510                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7511                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7512                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7513                        (uint64_t)pipeline, (uint64_t)cb_state->activeRenderPass->renderPass);
7514        }
7515        // TODO: VALIDATION_ERROR_00594 VALIDATION_ERROR_00596
7516
7517        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
7518        if (pipe_state) {
7519            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
7520            set_cb_pso_status(cb_state, pipe_state);
7521            set_pipeline_state(pipe_state);
7522        } else {
7523            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7524                            (uint64_t)pipeline, __LINE__, VALIDATION_ERROR_00600, "DS",
7525                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", (uint64_t)(pipeline),
7526                            validation_error_map[VALIDATION_ERROR_00600]);
7527        }
7528        addCommandBufferBinding(&pipe_state->cb_bindings,
7529                                {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, cb_state);
7530        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
7531            // Add binding for child renderpass
7532            auto rp_state = getRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
7533            if (rp_state) {
7534                addCommandBufferBinding(
7535                    &rp_state->cb_bindings,
7536                    {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7537            }
7538        }
7539    }
7540    lock.unlock();
7541    if (!skip)
7542        dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7543}
7544
7545VKAPI_ATTR void VKAPI_CALL
7546CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7547    bool skip_call = false;
7548    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7549    std::unique_lock<std::mutex> lock(global_lock);
7550    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7551    if (pCB) {
7552        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7553        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE);
7554        pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
7555    }
7556    lock.unlock();
7557    if (!skip_call)
7558        dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7559}
7560
7561VKAPI_ATTR void VKAPI_CALL
7562CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7563    bool skip_call = false;
7564    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7565    std::unique_lock<std::mutex> lock(global_lock);
7566    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7567    if (pCB) {
7568        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7569        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSCISSORSTATE);
7570        pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
7571    }
7572    lock.unlock();
7573    if (!skip_call)
7574        dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7575}
7576
7577VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7578    bool skip_call = false;
7579    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7580    std::unique_lock<std::mutex> lock(global_lock);
7581    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7582    if (pCB) {
7583        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7584        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE);
7585        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7586
7587        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
7588        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7589            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7590                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, VALIDATION_ERROR_01476, "DS",
7591                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7592                                 "flag.  This is undefined behavior and could be ignored. %s",
7593                                 validation_error_map[VALIDATION_ERROR_01476]);
7594        } else {
7595            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7596        }
7597    }
7598    lock.unlock();
7599    if (!skip_call)
7600        dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
7601}
7602
7603VKAPI_ATTR void VKAPI_CALL
7604CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7605    bool skip_call = false;
7606    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7607    std::unique_lock<std::mutex> lock(global_lock);
7608    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7609    if (pCB) {
7610        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7611        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE);
7612        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7613    }
7614    lock.unlock();
7615    if (!skip_call)
7616        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
7617}
7618
7619VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7620    bool skip_call = false;
7621    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7622    std::unique_lock<std::mutex> lock(global_lock);
7623    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7624    if (pCB) {
7625        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7626        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETBLENDSTATE);
7627        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7628    }
7629    lock.unlock();
7630    if (!skip_call)
7631        dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
7632}
7633
7634VKAPI_ATTR void VKAPI_CALL
7635CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7636    bool skip_call = false;
7637    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7638    std::unique_lock<std::mutex> lock(global_lock);
7639    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7640    if (pCB) {
7641        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7642        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE);
7643        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7644    }
7645    lock.unlock();
7646    if (!skip_call)
7647        dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7648}
7649
7650VKAPI_ATTR void VKAPI_CALL
7651CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7652    bool skip_call = false;
7653    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7654    std::unique_lock<std::mutex> lock(global_lock);
7655    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7656    if (pCB) {
7657        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7658        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE);
7659        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7660    }
7661    lock.unlock();
7662    if (!skip_call)
7663        dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7664}
7665
7666VKAPI_ATTR void VKAPI_CALL
7667CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7668    bool skip_call = false;
7669    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7670    std::unique_lock<std::mutex> lock(global_lock);
7671    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7672    if (pCB) {
7673        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7674        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE);
7675        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7676    }
7677    lock.unlock();
7678    if (!skip_call)
7679        dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7680}
7681
7682VKAPI_ATTR void VKAPI_CALL
7683CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7684    bool skip_call = false;
7685    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7686    std::unique_lock<std::mutex> lock(global_lock);
7687    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7688    if (pCB) {
7689        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7690        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE);
7691        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7692    }
7693    lock.unlock();
7694    if (!skip_call)
7695        dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
7696}
7697
7698VKAPI_ATTR void VKAPI_CALL
7699CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7700                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7701                      const uint32_t *pDynamicOffsets) {
7702    bool skip_call = false;
7703    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7704    std::unique_lock<std::mutex> lock(global_lock);
7705    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7706    if (pCB) {
7707        if (pCB->state == CB_RECORDING) {
7708            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7709            uint32_t totalDynamicDescriptors = 0;
7710            string errorString = "";
7711            uint32_t lastSetIndex = firstSet + setCount - 1;
7712            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7713                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7714                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7715            }
7716            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7717            auto pipeline_layout = getPipelineLayout(dev_data, layout);
7718            for (uint32_t i = 0; i < setCount; i++) {
7719                cvdescriptorset::DescriptorSet *descriptor_set = getSetNode(dev_data, pDescriptorSets[i]);
7720                if (descriptor_set) {
7721                    pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7722                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = descriptor_set;
7723                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7724                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7725                                         DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
7726                                         (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7727                    if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
7728                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7729                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7730                                             DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7731                                             "Descriptor Set 0x%" PRIxLEAST64
7732                                             " bound but it was never updated. You may want to either update it or not bind it.",
7733                                             (uint64_t)pDescriptorSets[i]);
7734                    }
7735                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7736                    if (!verify_set_layout_compatibility(dev_data, descriptor_set, pipeline_layout, i + firstSet, errorString)) {
7737                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7738                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7739                                             VALIDATION_ERROR_00974, "DS",
7740                                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7741                                             "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
7742                                             i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str(),
7743                                             validation_error_map[VALIDATION_ERROR_00974]);
7744                    }
7745
7746                    auto setDynamicDescriptorCount = descriptor_set->GetDynamicDescriptorCount();
7747
7748                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7749
7750                    if (setDynamicDescriptorCount) {
7751                        // First make sure we won't overstep bounds of pDynamicOffsets array
7752                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7753                            skip_call |=
7754                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7755                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7756                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7757                                        "descriptorSet #%u (0x%" PRIxLEAST64
7758                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7759                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7760                                        i, (uint64_t)pDescriptorSets[i], descriptor_set->GetDynamicDescriptorCount(),
7761                                        (dynamicOffsetCount - totalDynamicDescriptors));
7762                        } else { // Validate and store dynamic offsets with the set
7763                            // Validate Dynamic Offset Minimums
7764                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7765                            for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
7766                                if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7767                                    if (vk_safe_modulo(
7768                                            pDynamicOffsets[cur_dyn_offset],
7769                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7770                                        skip_call |= log_msg(
7771                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7772                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7773                                            "DS", "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7774                                                  "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
7775                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7776                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
7777                                            validation_error_map[VALIDATION_ERROR_00978]);
7778                                    }
7779                                    cur_dyn_offset++;
7780                                } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7781                                    if (vk_safe_modulo(
7782                                            pDynamicOffsets[cur_dyn_offset],
7783                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7784                                        skip_call |= log_msg(
7785                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7786                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7787                                            "DS", "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7788                                                  "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
7789                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7790                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
7791                                            validation_error_map[VALIDATION_ERROR_00978]);
7792                                    }
7793                                    cur_dyn_offset++;
7794                                }
7795                            }
7796
7797                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7798                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7799                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7800                            // Keep running total of dynamic descriptor count to verify at the end
7801                            totalDynamicDescriptors += setDynamicDescriptorCount;
7802
7803                        }
7804                    }
7805                } else {
7806                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7807                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7808                                         DRAWSTATE_INVALID_SET, "DS", "Attempt to bind descriptor set 0x%" PRIxLEAST64
7809                                         " that doesn't exist!",
7810                                         (uint64_t)pDescriptorSets[i]);
7811                }
7812                skip_call |= ValidateCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7813                UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS);
7814                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7815                if (firstSet > 0) { // Check set #s below the first bound set
7816                    for (uint32_t i = 0; i < firstSet; ++i) {
7817                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7818                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7819                                                             pipeline_layout, i, errorString)) {
7820                            skip_call |= log_msg(
7821                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7822                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7823                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7824                                "DescriptorSet 0x%" PRIxLEAST64
7825                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7826                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7827                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7828                        }
7829                    }
7830                }
7831                // Check if newly last bound set invalidates any remaining bound sets
7832                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7833                    if (oldFinalBoundSet &&
7834                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7835                        auto old_set = oldFinalBoundSet->GetSet();
7836                        skip_call |=
7837                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7838                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7839                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
7840                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7841                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7842                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7843                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7844                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7845                                    lastSetIndex + 1, (uint64_t)layout);
7846                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7847                    }
7848                }
7849            }
7850            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7851            if (totalDynamicDescriptors != dynamicOffsetCount) {
7852                skip_call |=
7853                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7854                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00975, "DS",
7855                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7856                            "is %u. It should exactly match the number of dynamic descriptors. %s",
7857                            setCount, totalDynamicDescriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_00975]);
7858            }
7859        } else {
7860            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7861        }
7862    }
7863    lock.unlock();
7864    if (!skip_call)
7865        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7866                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7867}
7868
7869VKAPI_ATTR void VKAPI_CALL
7870CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7871    bool skip_call = false;
7872    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7873    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7874    std::unique_lock<std::mutex> lock(global_lock);
7875
7876    auto buffer_state = getBufferState(dev_data, buffer);
7877    auto cb_node = getCBNode(dev_data, commandBuffer);
7878    if (cb_node && buffer_state) {
7879        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_02543);
7880        std::function<bool()> function = [=]() {
7881            return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
7882        };
7883        cb_node->validate_functions.push_back(function);
7884        skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7885        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER);
7886        VkDeviceSize offset_align = 0;
7887        switch (indexType) {
7888        case VK_INDEX_TYPE_UINT16:
7889            offset_align = 2;
7890            break;
7891        case VK_INDEX_TYPE_UINT32:
7892            offset_align = 4;
7893            break;
7894        default:
7895            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7896            break;
7897        }
7898        if (!offset_align || (offset % offset_align)) {
7899            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7900                                 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7901                                 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7902                                 offset, string_VkIndexType(indexType));
7903        }
7904        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7905    } else {
7906        assert(0);
7907    }
7908    lock.unlock();
7909    if (!skip_call)
7910        dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7911}
7912
7913void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7914    uint32_t end = firstBinding + bindingCount;
7915    if (pCB->currentDrawData.buffers.size() < end) {
7916        pCB->currentDrawData.buffers.resize(end);
7917    }
7918    for (uint32_t i = 0; i < bindingCount; ++i) {
7919        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7920    }
7921}
7922
7923static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7924
7925VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7926                                                uint32_t bindingCount, const VkBuffer *pBuffers,
7927                                                const VkDeviceSize *pOffsets) {
7928    bool skip_call = false;
7929    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7930    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7931    std::unique_lock<std::mutex> lock(global_lock);
7932
7933    auto cb_node = getCBNode(dev_data, commandBuffer);
7934    if (cb_node) {
7935        for (uint32_t i = 0; i < bindingCount; ++i) {
7936            auto buffer_state = getBufferState(dev_data, pBuffers[i]);
7937            assert(buffer_state);
7938            skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_02546);
7939            std::function<bool()> function = [=]() {
7940                return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
7941            };
7942            cb_node->validate_functions.push_back(function);
7943        }
7944        skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7945        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER);
7946        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7947    } else {
7948        skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7949    }
7950    lock.unlock();
7951    if (!skip_call)
7952        dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7953}
7954
7955// Expects global_lock to be held by caller
7956static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7957    for (auto imageView : pCB->updateImages) {
7958        auto view_state = getImageViewState(dev_data, imageView);
7959        if (!view_state)
7960            continue;
7961
7962        auto image_state = getImageState(dev_data, view_state->create_info.image);
7963        assert(image_state);
7964        std::function<bool()> function = [=]() {
7965            SetImageMemoryValid(dev_data, image_state, true);
7966            return false;
7967        };
7968        pCB->validate_functions.push_back(function);
7969    }
7970    for (auto buffer : pCB->updateBuffers) {
7971        auto buffer_state = getBufferState(dev_data, buffer);
7972        assert(buffer_state);
7973        std::function<bool()> function = [=]() {
7974            SetBufferMemoryValid(dev_data, buffer_state, true);
7975            return false;
7976        };
7977        pCB->validate_functions.push_back(function);
7978    }
7979}
7980
7981// Generic function to handle validation for all CmdDraw* type functions
7982static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
7983                                CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller,
7984                                UNIQUE_VALIDATION_ERROR_CODE msg_code, UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
7985    bool skip = false;
7986    *cb_state = getCBNode(dev_data, cmd_buffer);
7987    if (*cb_state) {
7988        skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
7989        skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
7990        skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
7991                                                                : insideRenderPass(dev_data, *cb_state, caller, msg_code);
7992    }
7993    return skip;
7994}
7995
7996// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
7997static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7998                                           CMD_TYPE cmd_type) {
7999    UpdateDrawState(dev_data, cb_state, bind_point);
8000    MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
8001    UpdateCmdBufferLastCmd(dev_data, cb_state, cmd_type);
8002}
8003
8004// Generic function to handle state update for all CmdDraw* type functions
8005static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8006                                   CMD_TYPE cmd_type, DRAW_TYPE draw_type) {
8007    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, cmd_type);
8008    updateResourceTrackingOnDraw(cb_state);
8009    cb_state->drawCount[draw_type]++;
8010}
8011
8012static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
8013                                   GLOBAL_CB_NODE **cb_state, const char *caller) {
8014    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VALIDATION_ERROR_01365,
8015                               VALIDATION_ERROR_02203);
8016}
8017
8018static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8019    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAW, DRAW);
8020}
8021
8022VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
8023                                   uint32_t firstVertex, uint32_t firstInstance) {
8024    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8025    GLOBAL_CB_NODE *cb_state = nullptr;
8026    std::unique_lock<std::mutex> lock(global_lock);
8027    bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
8028    lock.unlock();
8029    if (!skip) {
8030        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
8031        lock.lock();
8032        PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
8033        lock.unlock();
8034    }
8035}
8036
8037static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
8038                                          VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
8039    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VALIDATION_ERROR_01372,
8040                               VALIDATION_ERROR_02216);
8041}
8042
8043static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8044    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXED, DRAW_INDEXED);
8045}
8046
8047VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
8048                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
8049                                                            uint32_t firstInstance) {
8050    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8051    GLOBAL_CB_NODE *cb_state = nullptr;
8052    std::unique_lock<std::mutex> lock(global_lock);
8053    bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
8054                                              "vkCmdDrawIndexed()");
8055    lock.unlock();
8056    if (!skip) {
8057        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
8058        lock.lock();
8059        PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
8060        lock.unlock();
8061    }
8062}
8063
8064static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8065                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
8066                                           const char *caller) {
8067    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller,
8068                                    VALIDATION_ERROR_01381, VALIDATION_ERROR_02234);
8069    *buffer_state = getBufferState(dev_data, buffer);
8070    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02544);
8071    return skip;
8072}
8073
8074static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8075                                          BUFFER_STATE *buffer_state) {
8076    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDIRECT, DRAW_INDIRECT);
8077    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8078}
8079
8080VKAPI_ATTR void VKAPI_CALL
8081CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
8082    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8083    GLOBAL_CB_NODE *cb_state = nullptr;
8084    BUFFER_STATE *buffer_state = nullptr;
8085    std::unique_lock<std::mutex> lock(global_lock);
8086    bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
8087                                               &buffer_state, "vkCmdDrawIndirect()");
8088    lock.unlock();
8089    if (!skip) {
8090        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
8091        lock.lock();
8092        PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
8093        lock.unlock();
8094    }
8095}
8096
8097static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8098                                                  VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
8099                                                  BUFFER_STATE **buffer_state, const char *caller) {
8100    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
8101                                    VALIDATION_ERROR_01393, VALIDATION_ERROR_02272);
8102    *buffer_state = getBufferState(dev_data, buffer);
8103    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02545);
8104    return skip;
8105}
8106
8107static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8108                                                 BUFFER_STATE *buffer_state) {
8109    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXEDINDIRECT, DRAW_INDEXED_INDIRECT);
8110    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8111}
8112
8113VKAPI_ATTR void VKAPI_CALL
8114CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
8115    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8116    GLOBAL_CB_NODE *cb_state = nullptr;
8117    BUFFER_STATE *buffer_state = nullptr;
8118    std::unique_lock<std::mutex> lock(global_lock);
8119    bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
8120                                                      &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
8121    lock.unlock();
8122    if (!skip) {
8123        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
8124        lock.lock();
8125        PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
8126        lock.unlock();
8127    }
8128}
8129
8130static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
8131                                       VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
8132    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VALIDATION_ERROR_01562,
8133                               VALIDATION_ERROR_UNDEFINED);
8134}
8135
8136static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8137    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCH);
8138}
8139
8140VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
8141    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8142    GLOBAL_CB_NODE *cb_state = nullptr;
8143    std::unique_lock<std::mutex> lock(global_lock);
8144    bool skip =
8145        PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
8146    lock.unlock();
8147    if (!skip) {
8148        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
8149        lock.lock();
8150        PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
8151        lock.unlock();
8152    }
8153}
8154
8155static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8156                                               VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
8157                                               BUFFER_STATE **buffer_state, const char *caller) {
8158    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller,
8159                                    VALIDATION_ERROR_01569, VALIDATION_ERROR_UNDEFINED);
8160    *buffer_state = getBufferState(dev_data, buffer);
8161    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02547);
8162    return skip;
8163}
8164
8165static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8166                                              BUFFER_STATE *buffer_state) {
8167    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCHINDIRECT);
8168    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8169}
8170
8171VKAPI_ATTR void VKAPI_CALL
8172CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
8173    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8174    GLOBAL_CB_NODE *cb_state = nullptr;
8175    BUFFER_STATE *buffer_state = nullptr;
8176    std::unique_lock<std::mutex> lock(global_lock);
8177    bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
8178                                                   &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
8179    lock.unlock();
8180    if (!skip) {
8181        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
8182        lock.lock();
8183        PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
8184        lock.unlock();
8185    }
8186}
8187
8188VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
8189                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
8190    bool skip_call = false;
8191    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8192    std::unique_lock<std::mutex> lock(global_lock);
8193
8194    auto cb_node = getCBNode(dev_data, commandBuffer);
8195    auto src_buff_state = getBufferState(dev_data, srcBuffer);
8196    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8197    if (cb_node && src_buff_state && dst_buff_state) {
8198        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02531);
8199        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02532);
8200        // Update bindings between buffers and cmd buffer
8201        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
8202        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8203        // Validate that SRC & DST buffers have correct usage flags set
8204        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8205                                              VALIDATION_ERROR_01164, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8206        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8207                                              VALIDATION_ERROR_01165, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8208
8209        std::function<bool()> function = [=]() {
8210            return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBuffer()");
8211        };
8212        cb_node->validate_functions.push_back(function);
8213        function = [=]() {
8214            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8215            return false;
8216        };
8217        cb_node->validate_functions.push_back(function);
8218
8219        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
8220        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFER);
8221        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()", VALIDATION_ERROR_01172);
8222    } else {
8223        // Param_checker will flag errors on invalid objects, just assert here as debugging aid
8224        assert(0);
8225    }
8226    lock.unlock();
8227    if (!skip_call)
8228        dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
8229}
8230
8231static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
8232                                    VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout,
8233                                    UNIQUE_VALIDATION_ERROR_CODE msgCode) {
8234    bool skip_call = false;
8235
8236    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8237        uint32_t layer = i + subLayers.baseArrayLayer;
8238        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8239        IMAGE_CMD_BUF_LAYOUT_NODE node;
8240        if (!FindLayout(cb_node, srcImage, sub, node)) {
8241            SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
8242            continue;
8243        }
8244        if (node.layout != srcImageLayout) {
8245            // TODO: Improve log message in the next pass
8246            skip_call |=
8247                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8248                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
8249                                                                        "and doesn't match the current layout %s.",
8250                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
8251        }
8252    }
8253    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
8254        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
8255            // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
8256            auto image_state = getImageState(dev_data, srcImage);
8257            if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8258                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8259                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8260                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8261                                     "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
8262            }
8263        } else {
8264            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8265                                 msgCode, "DS", "Layout for input image is %s but can only be TRANSFER_SRC_OPTIMAL or GENERAL. %s",
8266                                 string_VkImageLayout(srcImageLayout), validation_error_map[msgCode]);
8267        }
8268    }
8269    return skip_call;
8270}
8271
8272static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
8273                                  VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout,
8274                                  UNIQUE_VALIDATION_ERROR_CODE msgCode) {
8275    bool skip_call = false;
8276
8277    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8278        uint32_t layer = i + subLayers.baseArrayLayer;
8279        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8280        IMAGE_CMD_BUF_LAYOUT_NODE node;
8281        if (!FindLayout(cb_node, destImage, sub, node)) {
8282            SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
8283            continue;
8284        }
8285        if (node.layout != destImageLayout) {
8286            skip_call |=
8287                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8288                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
8289                                                                        "doesn't match the current layout %s.",
8290                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
8291        }
8292    }
8293    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8294        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
8295            auto image_state = getImageState(dev_data, destImage);
8296            if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8297                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8298                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8299                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8300                                     "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
8301            }
8302        } else {
8303            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8304                                 msgCode, "DS", "Layout for output image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL. %s",
8305                                 string_VkImageLayout(destImageLayout), validation_error_map[msgCode]);
8306        }
8307    }
8308    return skip_call;
8309}
8310
8311static bool VerifyClearImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage image, VkImageSubresourceRange range,
8312                                   VkImageLayout dest_image_layout, const char *func_name) {
8313    bool skip = false;
8314
8315    VkImageSubresourceRange resolvedRange = range;
8316    ResolveRemainingLevelsLayers(dev_data, &resolvedRange, image);
8317
8318    if (dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8319        if (dest_image_layout == VK_IMAGE_LAYOUT_GENERAL) {
8320            auto image_state = getImageState(dev_data, image);
8321            if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8322                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8323                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8324                                0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8325                                "%s: Layout for cleared image should be TRANSFER_DST_OPTIMAL instead of GENERAL.", func_name);
8326            }
8327        } else {
8328            UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_01086;
8329            if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
8330                error_code = VALIDATION_ERROR_01101;
8331            } else {
8332                assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
8333            }
8334            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8335                            error_code, "DS", "%s: Layout for cleared image is %s but can only be "
8336                                              "TRANSFER_DST_OPTIMAL or GENERAL. %s",
8337                            func_name, string_VkImageLayout(dest_image_layout), validation_error_map[error_code]);
8338        }
8339    }
8340
8341    for (uint32_t levelIdx = 0; levelIdx < resolvedRange.levelCount; ++levelIdx) {
8342        uint32_t level = levelIdx + resolvedRange.baseMipLevel;
8343        for (uint32_t layerIdx = 0; layerIdx < resolvedRange.layerCount; ++layerIdx) {
8344            uint32_t layer = layerIdx + resolvedRange.baseArrayLayer;
8345            VkImageSubresource sub = {resolvedRange.aspectMask, level, layer};
8346            IMAGE_CMD_BUF_LAYOUT_NODE node;
8347            if (!FindLayout(cb_node, image, sub, node)) {
8348                SetLayout(cb_node, image, sub, IMAGE_CMD_BUF_LAYOUT_NODE(dest_image_layout, dest_image_layout));
8349                continue;
8350            }
8351            if (node.layout != dest_image_layout) {
8352                UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_01085;
8353                if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
8354                    error_code = VALIDATION_ERROR_01100;
8355                } else {
8356                    assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
8357                }
8358                skip |=
8359                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8360                            __LINE__, error_code, "DS", "%s: Cannot clear an image whose layout is %s and "
8361                                                        "doesn't match the current layout %s. %s",
8362                            func_name, string_VkImageLayout(dest_image_layout), string_VkImageLayout(node.layout),
8363                            validation_error_map[error_code]);
8364            }
8365        }
8366    }
8367
8368    return skip;
8369}
8370
8371// Test if two VkExtent3D structs are equivalent
8372static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
8373    bool result = true;
8374    if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
8375        (extent->depth != other_extent->depth)) {
8376        result = false;
8377    }
8378    return result;
8379}
8380
8381// Returns the image extent of a specific subresource.
8382static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
8383    const uint32_t mip = subresource->mipLevel;
8384    VkExtent3D extent = img->createInfo.extent;
8385    extent.width = std::max(1U, extent.width >> mip);
8386    extent.height = std::max(1U, extent.height >> mip);
8387    extent.depth = std::max(1U, extent.depth >> mip);
8388    return extent;
8389}
8390
8391// Test if the extent argument has all dimensions set to 0.
8392static inline bool IsExtentZero(const VkExtent3D *extent) {
8393    return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
8394}
8395
8396// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
8397static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
8398    // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
8399    VkExtent3D granularity = { 0, 0, 0 };
8400    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
8401    if (pPool) {
8402        granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
8403        if (vk_format_is_compressed(img->createInfo.format)) {
8404            auto block_size = vk_format_compressed_block_size(img->createInfo.format);
8405            granularity.width *= block_size.width;
8406            granularity.height *= block_size.height;
8407        }
8408    }
8409    return granularity;
8410}
8411
8412// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
8413static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
8414    bool valid = true;
8415    if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
8416        (vk_safe_modulo(extent->height, granularity->height) != 0)) {
8417        valid = false;
8418    }
8419    return valid;
8420}
8421
8422// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
8423static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
8424                                  const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
8425    bool skip = false;
8426    VkExtent3D offset_extent = {};
8427    offset_extent.width = static_cast<uint32_t>(abs(offset->x));
8428    offset_extent.height = static_cast<uint32_t>(abs(offset->y));
8429    offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
8430    if (IsExtentZero(granularity)) {
8431        // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
8432        if (IsExtentZero(&offset_extent) == false) {
8433            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8434                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8435                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
8436                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8437                            function, i, member, offset->x, offset->y, offset->z);
8438        }
8439    } else {
8440        // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
8441        // integer multiples of the image transfer granularity.
8442        if (IsExtentAligned(&offset_extent, granularity) == false) {
8443            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8444                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8445                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
8446                            "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
8447                            function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
8448                            granularity->depth);
8449        }
8450    }
8451    return skip;
8452}
8453
8454// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
8455static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
8456                                  const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
8457                                  const uint32_t i, const char *function, const char *member) {
8458    bool skip = false;
8459    if (IsExtentZero(granularity)) {
8460        // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
8461        // subresource extent.
8462        if (IsExtentEqual(extent, subresource_extent) == false) {
8463            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8464                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8465                            "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
8466                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8467                            function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
8468                            subresource_extent->height, subresource_extent->depth);
8469        }
8470    } else {
8471        // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
8472        // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
8473        // subresource extent dimensions.
8474        VkExtent3D offset_extent_sum = {};
8475        offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
8476        offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
8477        offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
8478        if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
8479            skip |=
8480                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8481                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8482                        "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
8483                        "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
8484                        "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
8485                        function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
8486                        granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
8487                        subresource_extent->width, subresource_extent->height, subresource_extent->depth);
8488        }
8489    }
8490    return skip;
8491}
8492
8493// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
8494static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
8495                               const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8496    bool skip = false;
8497    if (vk_safe_modulo(value, granularity) != 0) {
8498        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8499                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8500                        "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
8501                        "transfer granularity width (%d).",
8502                        function, i, member, value, granularity);
8503    }
8504    return skip;
8505}
8506
8507// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
8508static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
8509                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8510    bool skip = false;
8511    if (vk_safe_modulo(value, granularity) != 0) {
8512        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8513                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8514                        "%s: pRegion[%d].%s (%" PRIdLEAST64
8515                        ") must be an even integer multiple of this command buffer's queue family image transfer "
8516                        "granularity width (%d).",
8517                        function, i, member, value, granularity);
8518    }
8519    return skip;
8520}
8521
8522// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
8523static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8524                                                                    const IMAGE_STATE *img, const VkImageCopy *region,
8525                                                                    const uint32_t i, const char *function) {
8526    bool skip = false;
8527    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8528    skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
8529    skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
8530    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
8531    skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
8532                           "extent");
8533    return skip;
8534}
8535
8536// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
8537static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8538                                                                          const IMAGE_STATE *img, const VkBufferImageCopy *region,
8539                                                                          const uint32_t i, const char *function) {
8540    bool skip = false;
8541    if (vk_format_is_compressed(img->createInfo.format) == true) {
8542        // TODO: Add granularity checking for compressed formats
8543
8544        // bufferRowLength must be a multiple of the compressed texel block width
8545        // bufferImageHeight must be a multiple of the compressed texel block height
8546        // all members of imageOffset must be a multiple of the corresponding dimensions of the compressed texel block
8547        // bufferOffset must be a multiple of the compressed texel block size in bytes
8548        // imageExtent.width must be a multiple of the compressed texel block width or (imageExtent.width + imageOffset.x)
8549        //     must equal the image subresource width
8550        // imageExtent.height must be a multiple of the compressed texel block height or (imageExtent.height + imageOffset.y)
8551        //     must equal the image subresource height
8552        // imageExtent.depth must be a multiple of the compressed texel block depth or (imageExtent.depth + imageOffset.z)
8553        //     must equal the image subresource depth
8554    } else {
8555        VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8556        skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
8557        skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
8558        skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
8559        skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8560        VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8561        skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8562                               function, "imageExtent");
8563    }
8564    return skip;
8565}
8566
8567VKAPI_ATTR void VKAPI_CALL
8568CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8569             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
8570    bool skip_call = false;
8571    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8572    std::unique_lock<std::mutex> lock(global_lock);
8573
8574    auto cb_node = getCBNode(dev_data, commandBuffer);
8575    auto src_image_state = getImageState(dev_data, srcImage);
8576    auto dst_image_state = getImageState(dev_data, dstImage);
8577    if (cb_node && src_image_state && dst_image_state) {
8578        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02533);
8579        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02534);
8580        // Update bindings between images and cmd buffer
8581        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8582        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8583        // Validate that SRC & DST images have correct usage flags set
8584        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8585                                             VALIDATION_ERROR_01178, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8586        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8587                                             VALIDATION_ERROR_01181, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8588        std::function<bool()> function = [=]() {
8589            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImage()");
8590        };
8591        cb_node->validate_functions.push_back(function);
8592        function = [=]() {
8593            SetImageMemoryValid(dev_data, dst_image_state, true);
8594            return false;
8595        };
8596        cb_node->validate_functions.push_back(function);
8597
8598        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8599        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGE);
8600        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()", VALIDATION_ERROR_01194);
8601        for (uint32_t i = 0; i < regionCount; ++i) {
8602            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout,
8603                                                 VALIDATION_ERROR_01180);
8604            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout,
8605                                               VALIDATION_ERROR_01183);
8606            skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8607                                                                          "vkCmdCopyImage()");
8608        }
8609    } else {
8610        assert(0);
8611    }
8612    lock.unlock();
8613    if (!skip_call)
8614        dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8615                                              pRegions);
8616}
8617
8618// Validate that an image's sampleCount matches the requirement for a specific API call
8619static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
8620                                            const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
8621    bool skip = false;
8622    if (image_state->createInfo.samples != sample_count) {
8623        skip =
8624            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8625                    reinterpret_cast<uint64_t &>(image_state->image), 0, msgCode, "DS",
8626                    "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
8627                    reinterpret_cast<uint64_t &>(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
8628                    string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
8629    }
8630    return skip;
8631}
8632
8633VKAPI_ATTR void VKAPI_CALL
8634CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8635             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
8636    bool skip_call = false;
8637    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8638    std::unique_lock<std::mutex> lock(global_lock);
8639
8640    auto cb_node = getCBNode(dev_data, commandBuffer);
8641    auto src_image_state = getImageState(dev_data, srcImage);
8642    auto dst_image_state = getImageState(dev_data, dstImage);
8643    if (cb_node && src_image_state && dst_image_state) {
8644        skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage",
8645                                              VALIDATION_ERROR_02194);
8646        skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage",
8647                                              VALIDATION_ERROR_02195);
8648        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdBlitImage()", VALIDATION_ERROR_02539);
8649        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdBlitImage()", VALIDATION_ERROR_02540);
8650        // Update bindings between images and cmd buffer
8651        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8652        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8653        // Validate that SRC & DST images have correct usage flags set
8654        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8655                                             VALIDATION_ERROR_02182, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8656        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8657                                             VALIDATION_ERROR_02186, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8658        std::function<bool()> function = [=]() {
8659            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdBlitImage()");
8660        };
8661        cb_node->validate_functions.push_back(function);
8662        function = [=]() {
8663            SetImageMemoryValid(dev_data, dst_image_state, true);
8664            return false;
8665        };
8666        cb_node->validate_functions.push_back(function);
8667
8668        skip_call |= ValidateCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8669        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BLITIMAGE);
8670        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()", VALIDATION_ERROR_01300);
8671    } else {
8672        assert(0);
8673    }
8674    lock.unlock();
8675    if (!skip_call)
8676        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8677                                              pRegions, filter);
8678}
8679
8680VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8681                                                VkImage dstImage, VkImageLayout dstImageLayout,
8682                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8683    bool skip_call = false;
8684    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8685    std::unique_lock<std::mutex> lock(global_lock);
8686
8687    auto cb_node = getCBNode(dev_data, commandBuffer);
8688    auto src_buff_state = getBufferState(dev_data, srcBuffer);
8689    auto dst_image_state = getImageState(dev_data, dstImage);
8690    if (cb_node && src_buff_state && dst_image_state) {
8691        skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT,
8692                                              "vkCmdCopyBufferToImage(): dstImage", VALIDATION_ERROR_01232);
8693        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02535);
8694        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02536);
8695        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
8696        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8697        skip_call |=
8698            ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, VALIDATION_ERROR_01230,
8699                                     "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8700        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8701                                             VALIDATION_ERROR_01231, "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8702        std::function<bool()> function = [=]() {
8703            SetImageMemoryValid(dev_data, dst_image_state, true);
8704            return false;
8705        };
8706        cb_node->validate_functions.push_back(function);
8707        function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBufferToImage()"); };
8708        cb_node->validate_functions.push_back(function);
8709
8710        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8711        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE);
8712        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_01242);
8713        for (uint32_t i = 0; i < regionCount; ++i) {
8714            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout,
8715                                               VALIDATION_ERROR_01234);
8716            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8717                                                                                "vkCmdCopyBufferToImage()");
8718        }
8719    } else {
8720        assert(0);
8721    }
8722    lock.unlock();
8723    if (!skip_call)
8724        dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
8725}
8726
8727VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8728                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8729                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8730    bool skip_call = false;
8731    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8732    std::unique_lock<std::mutex> lock(global_lock);
8733
8734    auto cb_node = getCBNode(dev_data, commandBuffer);
8735    auto src_image_state = getImageState(dev_data, srcImage);
8736    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8737    if (cb_node && src_image_state && dst_buff_state) {
8738        skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT,
8739                                              "vkCmdCopyImageToBuffer(): srcImage", VALIDATION_ERROR_01249);
8740        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02537);
8741        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02538);
8742        // Update bindings between buffer/image and cmd buffer
8743        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8744        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8745        // Validate that SRC image & DST buffer have correct usage flags set
8746        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8747                                             VALIDATION_ERROR_01248, "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8748        skip_call |=
8749            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01252,
8750                                     "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8751        std::function<bool()> function = [=]() {
8752            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImageToBuffer()");
8753        };
8754        cb_node->validate_functions.push_back(function);
8755        function = [=]() {
8756            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8757            return false;
8758        };
8759        cb_node->validate_functions.push_back(function);
8760
8761        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8762        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER);
8763        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_01260);
8764        for (uint32_t i = 0; i < regionCount; ++i) {
8765            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout,
8766                                                 VALIDATION_ERROR_01251);
8767            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_image_state, &pRegions[i], i,
8768                                                                                "CmdCopyImageToBuffer");
8769        }
8770    } else {
8771        assert(0);
8772    }
8773    lock.unlock();
8774    if (!skip_call)
8775        dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
8776}
8777
8778VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8779                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8780    bool skip_call = false;
8781    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8782    std::unique_lock<std::mutex> lock(global_lock);
8783
8784    auto cb_node = getCBNode(dev_data, commandBuffer);
8785    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8786    if (cb_node && dst_buff_state) {
8787        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_02530);
8788        // Update bindings between buffer and cmd buffer
8789        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8790        // Validate that DST buffer has correct usage flags set
8791        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8792                                              VALIDATION_ERROR_01146, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8793        std::function<bool()> function = [=]() {
8794            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8795            return false;
8796        };
8797        cb_node->validate_functions.push_back(function);
8798
8799        skip_call |= ValidateCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8800        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_UPDATEBUFFER);
8801        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdUpdateBuffer()", VALIDATION_ERROR_01155);
8802    } else {
8803        assert(0);
8804    }
8805    lock.unlock();
8806    if (!skip_call)
8807        dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8808}
8809
8810VKAPI_ATTR void VKAPI_CALL
8811CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8812    bool skip_call = false;
8813    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8814    std::unique_lock<std::mutex> lock(global_lock);
8815
8816    auto cb_node = getCBNode(dev_data, commandBuffer);
8817    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8818    if (cb_node && dst_buff_state) {
8819        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdFillBuffer()", VALIDATION_ERROR_02529);
8820        // Update bindings between buffer and cmd buffer
8821        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8822        // Validate that DST buffer has correct usage flags set
8823        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8824                                              VALIDATION_ERROR_01137, "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8825        std::function<bool()> function = [=]() {
8826            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8827            return false;
8828        };
8829        cb_node->validate_functions.push_back(function);
8830
8831        skip_call |= ValidateCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8832        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_FILLBUFFER);
8833        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdFillBuffer()", VALIDATION_ERROR_01142);
8834    } else {
8835        assert(0);
8836    }
8837    lock.unlock();
8838    if (!skip_call)
8839        dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8840}
8841
8842VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8843                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
8844                                               const VkClearRect *pRects) {
8845    bool skip_call = false;
8846    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8847    std::unique_lock<std::mutex> lock(global_lock);
8848    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8849    if (pCB) {
8850        skip_call |= ValidateCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8851        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_CLEARATTACHMENTS);
8852        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8853        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8854            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8855            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8856            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8857            // call CmdClearAttachments
8858            // Otherwise this seems more like a performance warning.
8859            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8860                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer), 0,
8861                                 DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8862                                 "vkCmdClearAttachments() issued on command buffer object 0x%p prior to any Draw Cmds."
8863                                 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8864                                 commandBuffer);
8865        }
8866        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()", VALIDATION_ERROR_01122);
8867    }
8868
8869    // Validate that attachment is in reference list of active subpass
8870    if (pCB->activeRenderPass) {
8871        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->createInfo.ptr();
8872        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8873        auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
8874
8875        for (uint32_t i = 0; i < attachmentCount; i++) {
8876            auto clear_desc = &pAttachments[i];
8877            VkImageView image_view = VK_NULL_HANDLE;
8878
8879            if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8880                if (clear_desc->colorAttachment >= pSD->colorAttachmentCount) {
8881                    skip_call |= log_msg(
8882                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8883                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_01114, "DS",
8884                        "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d. %s",
8885                        clear_desc->colorAttachment, pCB->activeSubpass, validation_error_map[VALIDATION_ERROR_01114]);
8886                }
8887                else if (pSD->pColorAttachments[clear_desc->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
8888                    skip_call |= log_msg(
8889                        dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8890                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8891                        "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored.",
8892                        clear_desc->colorAttachment);
8893                }
8894                else {
8895                    image_view = framebuffer->createInfo.pAttachments[pSD->pColorAttachments[clear_desc->colorAttachment].attachment];
8896                }
8897            } else if (clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8898                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8899                    (pSD->pDepthStencilAttachment->attachment ==
8900                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8901
8902                    skip_call |= log_msg(
8903                        dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8904                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8905                        "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
8906                }
8907                else {
8908                    image_view = framebuffer->createInfo.pAttachments[pSD->pDepthStencilAttachment->attachment];
8909                }
8910            }
8911
8912            if (image_view) {
8913                auto image_view_state = getImageViewState(dev_data, image_view);
8914                auto aspects_present = image_view_state->create_info.subresourceRange.aspectMask;
8915                auto extra_aspects = clear_desc->aspectMask & ~aspects_present;
8916                // TODO: This is a different check than 01125. Need a new valid usage statement for this case, or should kill check.
8917                if (extra_aspects) {
8918                    skip_call |= log_msg(
8919                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
8920                            reinterpret_cast<uint64_t &>(image_view), __LINE__, VALIDATION_ERROR_01125, "DS",
8921                            "vkCmdClearAttachments() with aspects not present in image view: %s. %s",
8922                            string_VkImageAspectFlagBits((VkImageAspectFlagBits)extra_aspects),
8923                            validation_error_map[VALIDATION_ERROR_01125]);
8924                }
8925            }
8926        }
8927    }
8928    lock.unlock();
8929    if (!skip_call)
8930        dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8931}
8932
8933VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8934                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
8935                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8936    bool skip_call = false;
8937    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8938    std::unique_lock<std::mutex> lock(global_lock);
8939    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8940
8941    auto cb_node = getCBNode(dev_data, commandBuffer);
8942    auto image_state = getImageState(dev_data, image);
8943    if (cb_node && image_state) {
8944        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearColorImage()", VALIDATION_ERROR_02527);
8945        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
8946        std::function<bool()> function = [=]() {
8947            SetImageMemoryValid(dev_data, image_state, true);
8948            return false;
8949        };
8950        cb_node->validate_functions.push_back(function);
8951
8952        skip_call |= ValidateCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8953        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE);
8954        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()", VALIDATION_ERROR_01096);
8955    } else {
8956        assert(0);
8957    }
8958    for (uint32_t i = 0; i < rangeCount; ++i) {
8959        skip_call |= VerifyClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout, "vkCmdClearColorImage()");
8960    }
8961    lock.unlock();
8962    if (!skip_call)
8963        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8964}
8965
8966VKAPI_ATTR void VKAPI_CALL
8967CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8968                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8969                          const VkImageSubresourceRange *pRanges) {
8970    bool skip_call = false;
8971    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8972    std::unique_lock<std::mutex> lock(global_lock);
8973    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8974
8975    auto cb_node = getCBNode(dev_data, commandBuffer);
8976    auto image_state = getImageState(dev_data, image);
8977    if (cb_node && image_state) {
8978        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearDepthStencilImage()", VALIDATION_ERROR_02528);
8979        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
8980        std::function<bool()> function = [=]() {
8981            SetImageMemoryValid(dev_data, image_state, true);
8982            return false;
8983        };
8984        cb_node->validate_functions.push_back(function);
8985
8986        skip_call |= ValidateCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8987        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE);
8988        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()", VALIDATION_ERROR_01111);
8989    } else {
8990        assert(0);
8991    }
8992    for (uint32_t i = 0; i < rangeCount; ++i) {
8993        skip_call |= VerifyClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()");
8994    }
8995    lock.unlock();
8996    if (!skip_call)
8997        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
8998}
8999
9000VKAPI_ATTR void VKAPI_CALL
9001CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
9002                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
9003    bool skip_call = false;
9004    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9005    std::unique_lock<std::mutex> lock(global_lock);
9006
9007    auto cb_node = getCBNode(dev_data, commandBuffer);
9008    auto src_image_state = getImageState(dev_data, srcImage);
9009    auto dst_image_state = getImageState(dev_data, dstImage);
9010    if (cb_node && src_image_state && dst_image_state) {
9011        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdResolveImage()", VALIDATION_ERROR_02541);
9012        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdResolveImage()", VALIDATION_ERROR_02542);
9013        // Update bindings between images and cmd buffer
9014        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
9015        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
9016        std::function<bool()> function = [=]() {
9017            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdResolveImage()");
9018        };
9019        cb_node->validate_functions.push_back(function);
9020        function = [=]() {
9021            SetImageMemoryValid(dev_data, dst_image_state, true);
9022            return false;
9023        };
9024        cb_node->validate_functions.push_back(function);
9025
9026        skip_call |= ValidateCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
9027        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_RESOLVEIMAGE);
9028        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()", VALIDATION_ERROR_01335);
9029    } else {
9030        assert(0);
9031    }
9032    lock.unlock();
9033    if (!skip_call)
9034        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
9035                                                 pRegions);
9036}
9037
9038bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
9039    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9040    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9041    if (pCB) {
9042        pCB->eventToStageMap[event] = stageMask;
9043    }
9044    auto queue_data = dev_data->queueMap.find(queue);
9045    if (queue_data != dev_data->queueMap.end()) {
9046        queue_data->second.eventToStageMap[event] = stageMask;
9047    }
9048    return false;
9049}
9050
9051VKAPI_ATTR void VKAPI_CALL
9052CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
9053    bool skip_call = false;
9054    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9055    std::unique_lock<std::mutex> lock(global_lock);
9056    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9057    if (pCB) {
9058        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
9059        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETEVENT);
9060        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_00238);
9061        auto event_state = getEventNode(dev_data, event);
9062        if (event_state) {
9063            addCommandBufferBinding(&event_state->cb_bindings,
9064                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
9065            event_state->cb_bindings.insert(pCB);
9066        }
9067        pCB->events.push_back(event);
9068        if (!pCB->waitedEvents.count(event)) {
9069            pCB->writeEventsBeforeWait.push_back(event);
9070        }
9071        std::function<bool(VkQueue)> eventUpdate =
9072            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
9073        pCB->eventUpdates.push_back(eventUpdate);
9074    }
9075    lock.unlock();
9076    if (!skip_call)
9077        dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
9078}
9079
9080VKAPI_ATTR void VKAPI_CALL
9081CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
9082    bool skip_call = false;
9083    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9084    std::unique_lock<std::mutex> lock(global_lock);
9085    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9086    if (pCB) {
9087        skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
9088        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETEVENT);
9089        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_00249);
9090        auto event_state = getEventNode(dev_data, event);
9091        if (event_state) {
9092            addCommandBufferBinding(&event_state->cb_bindings,
9093                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
9094            event_state->cb_bindings.insert(pCB);
9095        }
9096        pCB->events.push_back(event);
9097        if (!pCB->waitedEvents.count(event)) {
9098            pCB->writeEventsBeforeWait.push_back(event);
9099        }
9100        std::function<bool(VkQueue)> eventUpdate =
9101            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
9102        pCB->eventUpdates.push_back(eventUpdate);
9103    }
9104    lock.unlock();
9105    if (!skip_call)
9106        dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
9107}
9108
9109static bool TransitionImageAspectLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkImageMemoryBarrier *mem_barrier,
9110                                        uint32_t level, uint32_t layer, VkImageAspectFlags aspect)
9111{
9112    if (!(mem_barrier->subresourceRange.aspectMask & aspect)) {
9113        return false;
9114    }
9115    VkImageSubresource sub = {aspect, level, layer};
9116    IMAGE_CMD_BUF_LAYOUT_NODE node;
9117    if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
9118        SetLayout(pCB, mem_barrier->image, sub,
9119                  IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
9120        return false;
9121    }
9122    bool skip = false;
9123    if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
9124        // TODO: Set memory invalid which is in mem_tracker currently
9125    } else if (node.layout != mem_barrier->oldLayout) {
9126        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9127                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9128                        "You cannot transition the layout of aspect %d from %s when current layout is %s.",
9129                        aspect, string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
9130    }
9131    SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
9132    return skip;
9133}
9134
9135// TODO: Separate validation and layout state updates
9136static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
9137                                   const VkImageMemoryBarrier *pImgMemBarriers) {
9138    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9139    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9140    bool skip = false;
9141    uint32_t levelCount = 0;
9142    uint32_t layerCount = 0;
9143
9144    for (uint32_t i = 0; i < memBarrierCount; ++i) {
9145        auto mem_barrier = &pImgMemBarriers[i];
9146        if (!mem_barrier)
9147            continue;
9148        // TODO: Do not iterate over every possibility - consolidate where
9149        // possible
9150        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
9151
9152        for (uint32_t j = 0; j < levelCount; j++) {
9153            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
9154            for (uint32_t k = 0; k < layerCount; k++) {
9155                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
9156                skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_COLOR_BIT);
9157                skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_DEPTH_BIT);
9158                skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_STENCIL_BIT);
9159                skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_METADATA_BIT);
9160            }
9161        }
9162    }
9163    return skip;
9164}
9165
9166// Print readable FlagBits in FlagMask
9167static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
9168    std::string result;
9169    std::string separator;
9170
9171    if (accessMask == 0) {
9172        result = "[None]";
9173    } else {
9174        result = "[";
9175        for (auto i = 0; i < 32; i++) {
9176            if (accessMask & (1 << i)) {
9177                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
9178                separator = " | ";
9179            }
9180        }
9181        result = result + "]";
9182    }
9183    return result;
9184}
9185
9186// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
9187// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
9188// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
9189static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
9190                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
9191                             const char *type) {
9192    bool skip_call = false;
9193
9194    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
9195        if (accessMask & ~(required_bit | optional_bits)) {
9196            // TODO: Verify against Valid Use
9197            skip_call |=
9198                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9199                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
9200                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
9201        }
9202    } else {
9203        if (!required_bit) {
9204            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9205                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
9206                                                                  "%s when layout is %s, unless the app has previously added a "
9207                                                                  "barrier for this transition.",
9208                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
9209                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
9210        } else {
9211            std::string opt_bits;
9212            if (optional_bits != 0) {
9213                std::stringstream ss;
9214                ss << optional_bits;
9215                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
9216            }
9217            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9218                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
9219                                                                  "layout is %s, unless the app has previously added a barrier for "
9220                                                                  "this transition.",
9221                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
9222                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
9223        }
9224    }
9225    return skip_call;
9226}
9227
9228static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
9229                                        const VkImageLayout &layout, const char *type) {
9230    bool skip_call = false;
9231    switch (layout) {
9232    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
9233        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
9234                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
9235        break;
9236    }
9237    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
9238        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
9239                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
9240        break;
9241    }
9242    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
9243        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
9244        break;
9245    }
9246    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
9247        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
9248                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
9249                                      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
9250        break;
9251    }
9252    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
9253        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
9254                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
9255        break;
9256    }
9257    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
9258        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
9259        break;
9260    }
9261    case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: {
9262        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type);
9263        break;
9264    }
9265    case VK_IMAGE_LAYOUT_UNDEFINED: {
9266        if (accessMask != 0) {
9267            // TODO: Verify against Valid Use section spec
9268            skip_call |=
9269                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9270                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
9271                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
9272        }
9273        break;
9274    }
9275    case VK_IMAGE_LAYOUT_GENERAL:
9276    default: { break; }
9277    }
9278    return skip_call;
9279}
9280
9281static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
9282                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
9283                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
9284                             const VkImageMemoryBarrier *pImageMemBarriers) {
9285    bool skip = false;
9286    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9287    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9288    if (pCB->activeRenderPass && memBarrierCount) {
9289        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
9290            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9291                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
9292                                                             "with no self dependency specified.",
9293                            funcName, pCB->activeSubpass);
9294        }
9295    }
9296    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
9297        auto mem_barrier = &pImageMemBarriers[i];
9298        auto image_data = getImageState(dev_data, mem_barrier->image);
9299        if (image_data) {
9300            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
9301            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
9302            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
9303                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
9304                // be VK_QUEUE_FAMILY_IGNORED
9305                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
9306                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9307                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9308                                    "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
9309                                    "VK_SHARING_MODE_CONCURRENT. Src and dst "
9310                                    "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
9311                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
9312                }
9313            } else {
9314                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
9315                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
9316                // or both be a valid queue family
9317                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
9318                    (src_q_f_index != dst_q_f_index)) {
9319                    skip |=
9320                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9321                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
9322                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
9323                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
9324                                                                     "must be.",
9325                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
9326                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
9327                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
9328                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
9329                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9330                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9331                                    "%s: Image 0x%" PRIx64 " was created with sharingMode "
9332                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
9333                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
9334                                    "queueFamilies crated for this device.",
9335                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, dst_q_f_index,
9336                                    dev_data->phys_dev_properties.queue_family_properties.size());
9337                }
9338            }
9339        }
9340
9341        if (mem_barrier) {
9342            if (mem_barrier->oldLayout != mem_barrier->newLayout) {
9343                skip |=
9344                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
9345                skip |=
9346                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
9347            }
9348            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
9349                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9350                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
9351                                                                 "PREINITIALIZED.",
9352                                funcName);
9353            }
9354            auto image_data = getImageState(dev_data, mem_barrier->image);
9355            VkFormat format = VK_FORMAT_UNDEFINED;
9356            uint32_t arrayLayers = 0, mipLevels = 0;
9357            bool imageFound = false;
9358            if (image_data) {
9359                format = image_data->createInfo.format;
9360                arrayLayers = image_data->createInfo.arrayLayers;
9361                mipLevels = image_data->createInfo.mipLevels;
9362                imageFound = true;
9363            } else if (dev_data->device_extensions.wsi_enabled) {
9364                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
9365                if (imageswap_data) {
9366                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
9367                    if (swapchain_data) {
9368                        format = swapchain_data->createInfo.imageFormat;
9369                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
9370                        mipLevels = 1;
9371                        imageFound = true;
9372                    }
9373                }
9374            }
9375            if (imageFound) {
9376                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
9377                skip |= ValidateImageAspectMask(dev_data, image_data->image, format, aspect_mask, funcName);
9378                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
9379                                     ? 1
9380                                     : mem_barrier->subresourceRange.layerCount;
9381                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
9382                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9383                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
9384                                                                               "baseArrayLayer (%d) and layerCount (%d) be less "
9385                                                                               "than or equal to the total number of layers (%d).",
9386                                    funcName, mem_barrier->subresourceRange.baseArrayLayer,
9387                                    mem_barrier->subresourceRange.layerCount, arrayLayers);
9388                }
9389                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
9390                                     ? 1
9391                                     : mem_barrier->subresourceRange.levelCount;
9392                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
9393                    skip |= log_msg(
9394                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9395                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
9396                                                         "(%d) and levelCount (%d) be less than or equal to "
9397                                                         "the total number of levels (%d).",
9398                        funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount, mipLevels);
9399                }
9400            }
9401        }
9402    }
9403    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
9404        auto mem_barrier = &pBufferMemBarriers[i];
9405        if (pCB->activeRenderPass) {
9406            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9407                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
9408        }
9409        if (!mem_barrier)
9410            continue;
9411
9412        // Validate buffer barrier queue family indices
9413        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
9414             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
9415            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
9416             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
9417            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9418                            DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9419                            "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
9420                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
9421                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9422                            dev_data->phys_dev_properties.queue_family_properties.size());
9423        }
9424
9425        auto buffer_state = getBufferState(dev_data, mem_barrier->buffer);
9426        if (buffer_state) {
9427            auto buffer_size = buffer_state->requirements.size;
9428            if (mem_barrier->offset >= buffer_size) {
9429                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9430                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64
9431                                                                 " which is not less than total size 0x%" PRIx64 ".",
9432                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9433                                reinterpret_cast<const uint64_t &>(mem_barrier->offset),
9434                                reinterpret_cast<const uint64_t &>(buffer_size));
9435            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
9436                skip |= log_msg(
9437                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9438                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
9439                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
9440                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9441                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
9442                    reinterpret_cast<const uint64_t &>(buffer_size));
9443            }
9444        }
9445    }
9446    return skip;
9447}
9448
9449bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
9450    bool skip_call = false;
9451    VkPipelineStageFlags stageMask = 0;
9452    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9453    for (uint32_t i = 0; i < eventCount; ++i) {
9454        auto event = pCB->events[firstEventIndex + i];
9455        auto queue_data = dev_data->queueMap.find(queue);
9456        if (queue_data == dev_data->queueMap.end())
9457            return false;
9458        auto event_data = queue_data->second.eventToStageMap.find(event);
9459        if (event_data != queue_data->second.eventToStageMap.end()) {
9460            stageMask |= event_data->second;
9461        } else {
9462            auto global_event_data = getEventNode(dev_data, event);
9463            if (!global_event_data) {
9464                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9465                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
9466                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
9467                                     reinterpret_cast<const uint64_t &>(event));
9468            } else {
9469                stageMask |= global_event_data->stageMask;
9470            }
9471        }
9472    }
9473    // TODO: Need to validate that host_bit is only set if set event is called
9474    // but set event can be called at any time.
9475    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
9476        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9477                             VALIDATION_ERROR_00254, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
9478                                                           "using srcStageMask 0x%X which must be the bitwise "
9479                                                           "OR of the stageMask parameters used in calls to "
9480                                                           "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
9481                                                           "used with vkSetEvent but instead is 0x%X. %s",
9482                             sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_00254]);
9483    }
9484    return skip_call;
9485}
9486
9487// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
9488static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
9489    {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
9490    {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
9491    {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
9492    {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9493    {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9494    {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9495    {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9496    {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9497    {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
9498    {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
9499    {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
9500    {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
9501    {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
9502    {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
9503
9504static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
9505                                                            VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
9506                                                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
9507                                                            VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
9508                                                            VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
9509                                                            VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
9510                                                            VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
9511                                                            VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
9512                                                            VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
9513                                                            VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
9514                                                            VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
9515                                                            VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
9516                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
9517                                                            VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
9518
9519bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
9520                                      VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
9521                                      UNIQUE_VALIDATION_ERROR_CODE error_code) {
9522    bool skip = false;
9523    // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
9524    for (const auto &item : stage_flag_bit_array) {
9525        if (stage_mask & item) {
9526            if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
9527                skip |=
9528                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9529                            reinterpret_cast<uint64_t &>(command_buffer), __LINE__, error_code, "DL",
9530                            "%s(): %s flag %s is not compatible with the queue family properties of this "
9531                            "command buffer. %s",
9532                            function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
9533                            validation_error_map[error_code]);
9534            }
9535        }
9536    }
9537    return skip;
9538}
9539
9540bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
9541                                                VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
9542                                                const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
9543    bool skip = false;
9544    uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
9545    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
9546    auto physical_device_state = getPhysicalDeviceState(instance_data, dev_data->physical_device);
9547
9548    // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
9549    // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
9550    // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
9551
9552    if (queue_family_index < physical_device_state->queue_family_properties.size()) {
9553        VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
9554
9555        if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
9556            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
9557                                                     function, "srcStageMask", error_code);
9558        }
9559        if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
9560            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
9561                                                     function, "dstStageMask", error_code);
9562        }
9563    }
9564    return skip;
9565}
9566
9567VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
9568                                         VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
9569                                         uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9570                                         uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9571                                         uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9572    bool skip = false;
9573    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9574    std::unique_lock<std::mutex> lock(global_lock);
9575    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
9576    if (cb_state) {
9577        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
9578                                                           VALIDATION_ERROR_02510);
9579        auto first_event_index = cb_state->events.size();
9580        for (uint32_t i = 0; i < eventCount; ++i) {
9581            auto event_state = getEventNode(dev_data, pEvents[i]);
9582            if (event_state) {
9583                addCommandBufferBinding(&event_state->cb_bindings,
9584                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
9585                                        cb_state);
9586                event_state->cb_bindings.insert(cb_state);
9587            }
9588            cb_state->waitedEvents.insert(pEvents[i]);
9589            cb_state->events.push_back(pEvents[i]);
9590        }
9591        std::function<bool(VkQueue)> event_update =
9592            std::bind(validateEventStageMask, std::placeholders::_1, cb_state, eventCount, first_event_index, sourceStageMask);
9593        cb_state->eventUpdates.push_back(event_update);
9594        if (cb_state->state == CB_RECORDING) {
9595            skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
9596            UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_WAITEVENTS);
9597        } else {
9598            skip |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
9599        }
9600        skip |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9601        skip |= ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9602                                 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9603    }
9604    lock.unlock();
9605    if (!skip)
9606        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
9607                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9608                                               imageMemoryBarrierCount, pImageMemoryBarriers);
9609}
9610
9611VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
9612                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
9613                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9614                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9615                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9616    bool skip = false;
9617    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9618    std::unique_lock<std::mutex> lock(global_lock);
9619    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
9620    if (cb_state) {
9621        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
9622                                                           VALIDATION_ERROR_02513);
9623        skip |= ValidateCmd(dev_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
9624        UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_PIPELINEBARRIER);
9625        skip |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9626        skip |= ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers,
9627                                 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9628    }
9629    lock.unlock();
9630    if (!skip)
9631        dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
9632                                                    pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9633                                                    imageMemoryBarrierCount, pImageMemoryBarriers);
9634}
9635
9636bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
9637    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9638    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9639    if (pCB) {
9640        pCB->queryToStateMap[object] = value;
9641    }
9642    auto queue_data = dev_data->queueMap.find(queue);
9643    if (queue_data != dev_data->queueMap.end()) {
9644        queue_data->second.queryToStateMap[object] = value;
9645    }
9646    return false;
9647}
9648
9649VKAPI_ATTR void VKAPI_CALL
9650CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
9651    bool skip_call = false;
9652    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9653    std::unique_lock<std::mutex> lock(global_lock);
9654    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9655    if (pCB) {
9656        QueryObject query = {queryPool, slot};
9657        pCB->activeQueries.insert(query);
9658        if (!pCB->startedQueries.count(query)) {
9659            pCB->startedQueries.insert(query);
9660        }
9661        skip_call |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
9662        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BEGINQUERY);
9663        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9664                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9665    }
9666    lock.unlock();
9667    if (!skip_call)
9668        dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
9669}
9670
9671VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
9672    bool skip_call = false;
9673    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9674    std::unique_lock<std::mutex> lock(global_lock);
9675    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9676    if (pCB) {
9677        QueryObject query = {queryPool, slot};
9678        if (!pCB->activeQueries.count(query)) {
9679            skip_call |=
9680                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9681                        VALIDATION_ERROR_01041, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
9682                        (uint64_t)(queryPool), slot, validation_error_map[VALIDATION_ERROR_01041]);
9683        } else {
9684            pCB->activeQueries.erase(query);
9685        }
9686        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9687        pCB->queryUpdates.push_back(queryUpdate);
9688        if (pCB->state == CB_RECORDING) {
9689            skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
9690            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDQUERY);
9691        } else {
9692            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
9693        }
9694        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9695                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9696    }
9697    lock.unlock();
9698    if (!skip_call)
9699        dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
9700}
9701
9702VKAPI_ATTR void VKAPI_CALL
9703CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
9704    bool skip_call = false;
9705    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9706    std::unique_lock<std::mutex> lock(global_lock);
9707    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9708    if (pCB) {
9709        for (uint32_t i = 0; i < queryCount; i++) {
9710            QueryObject query = {queryPool, firstQuery + i};
9711            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
9712            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
9713            pCB->queryUpdates.push_back(queryUpdate);
9714        }
9715        if (pCB->state == CB_RECORDING) {
9716            skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9717            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETQUERYPOOL);
9718        } else {
9719            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
9720        }
9721        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetQueryPool()", VALIDATION_ERROR_01025);
9722        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9723                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9724    }
9725    lock.unlock();
9726    if (!skip_call)
9727        dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
9728}
9729
9730bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9731    bool skip_call = false;
9732    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9733    auto queue_data = dev_data->queueMap.find(queue);
9734    if (queue_data == dev_data->queueMap.end())
9735        return false;
9736    for (uint32_t i = 0; i < queryCount; i++) {
9737        QueryObject query = {queryPool, firstQuery + i};
9738        auto query_data = queue_data->second.queryToStateMap.find(query);
9739        bool fail = false;
9740        if (query_data != queue_data->second.queryToStateMap.end()) {
9741            if (!query_data->second) {
9742                fail = true;
9743            }
9744        } else {
9745            auto global_query_data = dev_data->queryToStateMap.find(query);
9746            if (global_query_data != dev_data->queryToStateMap.end()) {
9747                if (!global_query_data->second) {
9748                    fail = true;
9749                }
9750            } else {
9751                fail = true;
9752            }
9753        }
9754        if (fail) {
9755            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9756                                 DRAWSTATE_INVALID_QUERY, "DS",
9757                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9758                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9759        }
9760    }
9761    return skip_call;
9762}
9763
9764VKAPI_ATTR void VKAPI_CALL
9765CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
9766                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
9767    bool skip_call = false;
9768    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9769    std::unique_lock<std::mutex> lock(global_lock);
9770
9771    auto cb_node = getCBNode(dev_data, commandBuffer);
9772    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
9773    if (cb_node && dst_buff_state) {
9774        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_02526);
9775        // Update bindings between buffer and cmd buffer
9776        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
9777        // Validate that DST buffer has correct usage flags set
9778        skip_call |=
9779            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01066,
9780                                     "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
9781        std::function<bool()> function = [=]() {
9782            SetBufferMemoryValid(dev_data, dst_buff_state, true);
9783            return false;
9784        };
9785        cb_node->validate_functions.push_back(function);
9786        std::function<bool(VkQueue)> queryUpdate =
9787            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9788        cb_node->queryUpdates.push_back(queryUpdate);
9789        if (cb_node->state == CB_RECORDING) {
9790            skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9791            UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS);
9792        } else {
9793            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
9794        }
9795        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_01074);
9796        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9797                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
9798    } else {
9799        assert(0);
9800    }
9801    lock.unlock();
9802    if (!skip_call)
9803        dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
9804                                                         stride, flags);
9805}
9806
9807VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9808                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9809                                            const void *pValues) {
9810    bool skip_call = false;
9811    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9812    std::unique_lock<std::mutex> lock(global_lock);
9813    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9814    if (pCB) {
9815        if (pCB->state == CB_RECORDING) {
9816            skip_call |= ValidateCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9817            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_PUSHCONSTANTS);
9818        } else {
9819            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9820        }
9821    }
9822    skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
9823    if (0 == stageFlags) {
9824        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9825                             VALIDATION_ERROR_00996, "DS", "vkCmdPushConstants() call has no stageFlags set. %s",
9826                             validation_error_map[VALIDATION_ERROR_00996]);
9827    }
9828
9829    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
9830    auto pipeline_layout = getPipelineLayout(dev_data, layout);
9831    // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9832    // contained in the pipeline ranges.
9833    // Build a {start, end} span list for ranges with matching stage flags.
9834    const auto &ranges = pipeline_layout->push_constant_ranges;
9835    struct span {
9836        uint32_t start;
9837        uint32_t end;
9838    };
9839    std::vector<span> spans;
9840    spans.reserve(ranges.size());
9841    for (const auto &iter : ranges) {
9842        if (iter.stageFlags == stageFlags) {
9843            spans.push_back({iter.offset, iter.offset + iter.size});
9844        }
9845    }
9846    if (spans.size() == 0) {
9847        // There were no ranges that matched the stageFlags.
9848        skip_call |=
9849            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9850                    VALIDATION_ERROR_00988, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9851                                                  "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ". %s",
9852                    (uint32_t)stageFlags, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
9853    } else {
9854        // Sort span list by start value.
9855        struct comparer {
9856            bool operator()(struct span i, struct span j) { return i.start < j.start; }
9857        } my_comparer;
9858        std::sort(spans.begin(), spans.end(), my_comparer);
9859
9860        // Examine two spans at a time.
9861        std::vector<span>::iterator current = spans.begin();
9862        std::vector<span>::iterator next = current + 1;
9863        while (next != spans.end()) {
9864            if (current->end < next->start) {
9865                // There is a gap; cannot coalesce. Move to the next two spans.
9866                ++current;
9867                ++next;
9868            } else {
9869                // Coalesce the two spans.  The start of the next span
9870                // is within the current span, so pick the larger of
9871                // the end values to extend the current span.
9872                // Then delete the next span and set next to the span after it.
9873                current->end = max(current->end, next->end);
9874                next = spans.erase(next);
9875            }
9876        }
9877
9878        // Now we can check if the incoming range is within any of the spans.
9879        bool contained_in_a_range = false;
9880        for (uint32_t i = 0; i < spans.size(); ++i) {
9881            if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9882                contained_in_a_range = true;
9883                break;
9884            }
9885        }
9886        if (!contained_in_a_range) {
9887            skip_call |= log_msg(
9888                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9889                VALIDATION_ERROR_00988, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9890                                              "with stageFlags = 0x%" PRIx32 " "
9891                                              "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ". %s",
9892                offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
9893        }
9894    }
9895    lock.unlock();
9896    if (!skip_call)
9897        dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9898}
9899
9900VKAPI_ATTR void VKAPI_CALL
9901CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9902    bool skip_call = false;
9903    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9904    std::unique_lock<std::mutex> lock(global_lock);
9905    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9906    if (pCB) {
9907        QueryObject query = {queryPool, slot};
9908        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9909        pCB->queryUpdates.push_back(queryUpdate);
9910        if (pCB->state == CB_RECORDING) {
9911            skip_call |= ValidateCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9912            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_WRITETIMESTAMP);
9913        } else {
9914            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9915        }
9916    }
9917    lock.unlock();
9918    if (!skip_call)
9919        dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9920}
9921
9922static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9923                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
9924                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
9925    bool skip_call = false;
9926
9927    for (uint32_t attach = 0; attach < count; attach++) {
9928        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9929            // Attachment counts are verified elsewhere, but prevent an invalid access
9930            if (attachments[attach].attachment < fbci->attachmentCount) {
9931                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9932                auto view_state = getImageViewState(dev_data, *image_view);
9933                if (view_state) {
9934                    const VkImageCreateInfo *ici = &getImageState(dev_data, view_state->create_info.image)->createInfo;
9935                    if (ici != nullptr) {
9936                        if ((ici->usage & usage_flag) == 0) {
9937                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9938                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, error_code, "DS",
9939                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9940                                                 "IMAGE_USAGE flags (%s). %s",
9941                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
9942                                                 validation_error_map[error_code]);
9943                        }
9944                    }
9945                }
9946            }
9947        }
9948    }
9949    return skip_call;
9950}
9951
9952// Validate VkFramebufferCreateInfo which includes:
9953// 1. attachmentCount equals renderPass attachmentCount
9954// 2. corresponding framebuffer and renderpass attachments have matching formats
9955// 3. corresponding framebuffer and renderpass attachments have matching sample counts
9956// 4. fb attachments only have a single mip level
9957// 5. fb attachment dimensions are each at least as large as the fb
9958// 6. fb attachments use idenity swizzle
9959// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9960// 8. fb dimensions are within physical device limits
9961static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9962    bool skip_call = false;
9963
9964    auto rp_state = getRenderPassState(dev_data, pCreateInfo->renderPass);
9965    if (rp_state) {
9966        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
9967        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9968            skip_call |= log_msg(
9969                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9970                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00404, "DS",
9971                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9972                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
9973                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass),
9974                validation_error_map[VALIDATION_ERROR_00404]);
9975        } else {
9976            // attachmentCounts match, so make sure corresponding attachment details line up
9977            const VkImageView *image_views = pCreateInfo->pAttachments;
9978            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9979                auto view_state = getImageViewState(dev_data, image_views[i]);
9980                auto &ivci = view_state->create_info;
9981                if (ivci.format != rpci->pAttachments[i].format) {
9982                    skip_call |= log_msg(
9983                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9984                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00408, "DS",
9985                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9986                        "the format of "
9987                        "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
9988                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
9989                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00408]);
9990                }
9991                const VkImageCreateInfo *ici = &getImageState(dev_data, ivci.image)->createInfo;
9992                if (ici->samples != rpci->pAttachments[i].samples) {
9993                    skip_call |= log_msg(
9994                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9995                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00409, "DS",
9996                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9997                        "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
9998                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
9999                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00409]);
10000                }
10001                // Verify that view only has a single mip level
10002                if (ivci.subresourceRange.levelCount != 1) {
10003                    skip_call |=
10004                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10005                                VALIDATION_ERROR_00411, "DS",
10006                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
10007                                "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
10008                                i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_00411]);
10009                }
10010                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
10011                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
10012                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
10013                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
10014                    (mip_height < pCreateInfo->height)) {
10015                    skip_call |=
10016                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10017                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
10018                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
10019                                "than the corresponding "
10020                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
10021                                "dimensions for "
10022                                "attachment #%u, framebuffer:\n"
10023                                "width: %u, %u\n"
10024                                "height: %u, %u\n"
10025                                "layerCount: %u, %u\n",
10026                                i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
10027                                pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
10028                }
10029                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
10030                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
10031                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
10032                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
10033                    skip_call |= log_msg(
10034                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10035                        VALIDATION_ERROR_00412, "DS",
10036                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
10037                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
10038                        "r swizzle = %s\n"
10039                        "g swizzle = %s\n"
10040                        "b swizzle = %s\n"
10041                        "a swizzle = %s\n"
10042                        "%s",
10043                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
10044                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
10045                        validation_error_map[VALIDATION_ERROR_00412]);
10046                }
10047            }
10048        }
10049        // Verify correct attachment usage flags
10050        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
10051            // Verify input attachments:
10052            skip_call |=
10053                MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
10054                           pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_00407);
10055            // Verify color attachments:
10056            skip_call |=
10057                MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
10058                           pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_00405);
10059            // Verify depth/stencil attachments:
10060            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
10061                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
10062                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_00406);
10063            }
10064        }
10065    }
10066    // Verify FB dimensions are within physical device limits
10067    if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
10068        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10069                             VALIDATION_ERROR_00413, "DS",
10070                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
10071                             "Requested width: %u, device max: %u\n"
10072                             "%s",
10073                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
10074                             validation_error_map[VALIDATION_ERROR_00413]);
10075    }
10076    if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
10077        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10078                             VALIDATION_ERROR_00414, "DS",
10079                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
10080                             "Requested height: %u, device max: %u\n"
10081                             "%s",
10082                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
10083                             validation_error_map[VALIDATION_ERROR_00414]);
10084    }
10085    if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
10086        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10087                             VALIDATION_ERROR_00415, "DS",
10088                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
10089                             "Requested layers: %u, device max: %u\n"
10090                             "%s",
10091                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
10092                             validation_error_map[VALIDATION_ERROR_00415]);
10093    }
10094    return skip_call;
10095}
10096
10097// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
10098//  Return true if an error is encountered and callback returns true to skip call down chain
10099//   false indicates that call down chain should proceed
10100static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
10101    // TODO : Verify that renderPass FB is created with is compatible with FB
10102    bool skip_call = false;
10103    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
10104    return skip_call;
10105}
10106
10107// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
10108static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
10109    // Shadow create info and store in map
10110    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
10111        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
10112
10113    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
10114        VkImageView view = pCreateInfo->pAttachments[i];
10115        auto view_state = getImageViewState(dev_data, view);
10116        if (!view_state) {
10117            continue;
10118        }
10119        MT_FB_ATTACHMENT_INFO fb_info;
10120        fb_info.mem = getImageState(dev_data, view_state->create_info.image)->binding.mem;
10121        fb_info.view_state = view_state;
10122        fb_info.image = view_state->create_info.image;
10123        fb_state->attachments.push_back(fb_info);
10124    }
10125    dev_data->frameBufferMap[fb] = std::move(fb_state);
10126}
10127
10128VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
10129                                                 const VkAllocationCallbacks *pAllocator,
10130                                                 VkFramebuffer *pFramebuffer) {
10131    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10132    std::unique_lock<std::mutex> lock(global_lock);
10133    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
10134    lock.unlock();
10135
10136    if (skip_call)
10137        return VK_ERROR_VALIDATION_FAILED_EXT;
10138
10139    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
10140
10141    if (VK_SUCCESS == result) {
10142        lock.lock();
10143        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
10144        lock.unlock();
10145    }
10146    return result;
10147}
10148
10149static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
10150                           std::unordered_set<uint32_t> &processed_nodes) {
10151    // If we have already checked this node we have not found a dependency path so return false.
10152    if (processed_nodes.count(index))
10153        return false;
10154    processed_nodes.insert(index);
10155    const DAGNode &node = subpass_to_node[index];
10156    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
10157    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
10158        for (auto elem : node.prev) {
10159            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
10160                return true;
10161        }
10162    } else {
10163        return true;
10164    }
10165    return false;
10166}
10167
10168static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
10169                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
10170    bool result = true;
10171    // Loop through all subpasses that share the same attachment and make sure a dependency exists
10172    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
10173        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
10174            continue;
10175        const DAGNode &node = subpass_to_node[subpass];
10176        // Check for a specified dependency between the two nodes. If one exists we are done.
10177        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
10178        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
10179        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
10180            // If no dependency exits an implicit dependency still might. If not, throw an error.
10181            std::unordered_set<uint32_t> processed_nodes;
10182            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
10183                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
10184                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10185                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10186                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
10187                                     dependent_subpasses[k]);
10188                result = false;
10189            }
10190        }
10191    }
10192    return result;
10193}
10194
10195static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
10196                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
10197    const DAGNode &node = subpass_to_node[index];
10198    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
10199    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
10200    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10201        if (attachment == subpass.pColorAttachments[j].attachment)
10202            return true;
10203    }
10204    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10205        if (attachment == subpass.pDepthStencilAttachment->attachment)
10206            return true;
10207    }
10208    bool result = false;
10209    // Loop through previous nodes and see if any of them write to the attachment.
10210    for (auto elem : node.prev) {
10211        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
10212    }
10213    // If the attachment was written to by a previous node than this node needs to preserve it.
10214    if (result && depth > 0) {
10215        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
10216        bool has_preserved = false;
10217        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10218            if (subpass.pPreserveAttachments[j] == attachment) {
10219                has_preserved = true;
10220                break;
10221            }
10222        }
10223        if (!has_preserved) {
10224            skip_call |=
10225                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10226                        DRAWSTATE_INVALID_RENDERPASS, "DS",
10227                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
10228        }
10229    }
10230    return result;
10231}
10232
10233template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
10234    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
10235           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
10236}
10237
10238bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
10239    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
10240            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
10241}
10242
10243static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
10244                                 RENDER_PASS_STATE const *renderPass) {
10245    bool skip_call = false;
10246    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
10247    auto const pCreateInfo = renderPass->createInfo.ptr();
10248    auto const & subpass_to_node = renderPass->subpassToNode;
10249    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
10250    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
10251    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
10252    // Find overlapping attachments
10253    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
10254        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
10255            VkImageView viewi = pFramebufferInfo->pAttachments[i];
10256            VkImageView viewj = pFramebufferInfo->pAttachments[j];
10257            if (viewi == viewj) {
10258                overlapping_attachments[i].push_back(j);
10259                overlapping_attachments[j].push_back(i);
10260                continue;
10261            }
10262            auto view_state_i = getImageViewState(dev_data, viewi);
10263            auto view_state_j = getImageViewState(dev_data, viewj);
10264            if (!view_state_i || !view_state_j) {
10265                continue;
10266            }
10267            auto view_ci_i = view_state_i->create_info;
10268            auto view_ci_j = view_state_j->create_info;
10269            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
10270                overlapping_attachments[i].push_back(j);
10271                overlapping_attachments[j].push_back(i);
10272                continue;
10273            }
10274            auto image_data_i = getImageState(dev_data, view_ci_i.image);
10275            auto image_data_j = getImageState(dev_data, view_ci_j.image);
10276            if (!image_data_i || !image_data_j) {
10277                continue;
10278            }
10279            if (image_data_i->binding.mem == image_data_j->binding.mem &&
10280                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
10281                                   image_data_j->binding.size)) {
10282                overlapping_attachments[i].push_back(j);
10283                overlapping_attachments[j].push_back(i);
10284            }
10285        }
10286    }
10287    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
10288        uint32_t attachment = i;
10289        for (auto other_attachment : overlapping_attachments[i]) {
10290            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
10291                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10292                                     __LINE__, VALIDATION_ERROR_00324, "DS", "Attachment %d aliases attachment %d but doesn't "
10293                                                                             "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
10294                                     attachment, other_attachment, validation_error_map[VALIDATION_ERROR_00324]);
10295            }
10296            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
10297                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10298                                     __LINE__, VALIDATION_ERROR_00324, "DS", "Attachment %d aliases attachment %d but doesn't "
10299                                                                             "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
10300                                     other_attachment, attachment, validation_error_map[VALIDATION_ERROR_00324]);
10301            }
10302        }
10303    }
10304    // Find for each attachment the subpasses that use them.
10305    unordered_set<uint32_t> attachmentIndices;
10306    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10307        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10308        attachmentIndices.clear();
10309        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10310            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10311            if (attachment == VK_ATTACHMENT_UNUSED)
10312                continue;
10313            input_attachment_to_subpass[attachment].push_back(i);
10314            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10315                input_attachment_to_subpass[overlapping_attachment].push_back(i);
10316            }
10317        }
10318        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10319            uint32_t attachment = subpass.pColorAttachments[j].attachment;
10320            if (attachment == VK_ATTACHMENT_UNUSED)
10321                continue;
10322            output_attachment_to_subpass[attachment].push_back(i);
10323            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10324                output_attachment_to_subpass[overlapping_attachment].push_back(i);
10325            }
10326            attachmentIndices.insert(attachment);
10327        }
10328        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10329            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10330            output_attachment_to_subpass[attachment].push_back(i);
10331            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10332                output_attachment_to_subpass[overlapping_attachment].push_back(i);
10333            }
10334
10335            if (attachmentIndices.count(attachment)) {
10336                skip_call |=
10337                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10338                            DRAWSTATE_INVALID_RENDERPASS, "DS",
10339                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
10340            }
10341        }
10342    }
10343    // If there is a dependency needed make sure one exists
10344    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10345        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10346        // If the attachment is an input then all subpasses that output must have a dependency relationship
10347        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10348            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10349            if (attachment == VK_ATTACHMENT_UNUSED)
10350                continue;
10351            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10352        }
10353        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
10354        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10355            uint32_t attachment = subpass.pColorAttachments[j].attachment;
10356            if (attachment == VK_ATTACHMENT_UNUSED)
10357                continue;
10358            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10359            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10360        }
10361        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10362            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
10363            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10364            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10365        }
10366    }
10367    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
10368    // written.
10369    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10370        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10371        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10372            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
10373        }
10374    }
10375    return skip_call;
10376}
10377// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
10378// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
10379// READ_ONLY layout attachments don't have CLEAR as their loadOp.
10380static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
10381                                                  const uint32_t attachment,
10382                                                  const VkAttachmentDescription &attachment_description) {
10383    bool skip_call = false;
10384    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
10385    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
10386        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
10387            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
10388            skip_call |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
10389                                 VkDebugReportObjectTypeEXT(0), __LINE__, VALIDATION_ERROR_02351, "DS",
10390                                 "Cannot clear attachment %d with invalid first layout %s. %s", attachment,
10391                                 string_VkImageLayout(first_layout), validation_error_map[VALIDATION_ERROR_02351]);
10392        }
10393    }
10394    return skip_call;
10395}
10396
10397static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
10398    bool skip = false;
10399
10400    // Track when we're observing the first use of an attachment
10401    std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
10402    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10403        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10404        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10405            auto attach_index = subpass.pColorAttachments[j].attachment;
10406            if (attach_index == VK_ATTACHMENT_UNUSED)
10407                continue;
10408
10409            switch (subpass.pColorAttachments[j].layout) {
10410            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
10411                // This is ideal.
10412                break;
10413
10414            case VK_IMAGE_LAYOUT_GENERAL:
10415                // May not be optimal; TODO: reconsider this warning based on other constraints?
10416                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10417                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10418                                "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
10419                break;
10420
10421            default:
10422                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10423                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10424                                "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
10425                                string_VkImageLayout(subpass.pColorAttachments[j].layout));
10426            }
10427
10428            if (attach_first_use[attach_index]) {
10429                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout,
10430                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
10431            }
10432            attach_first_use[attach_index] = false;
10433        }
10434        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10435            switch (subpass.pDepthStencilAttachment->layout) {
10436            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
10437            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
10438                // These are ideal.
10439                break;
10440
10441            case VK_IMAGE_LAYOUT_GENERAL:
10442                // May not be optimal; TODO: reconsider this warning based on other constraints? GENERAL can be better than doing
10443                // a bunch of transitions.
10444                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10445                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10446                                "GENERAL layout for depth attachment may not give optimal performance.");
10447                break;
10448
10449            default:
10450                // No other layouts are acceptable
10451                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10452                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10453                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
10454                                "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
10455                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
10456            }
10457
10458            auto attach_index = subpass.pDepthStencilAttachment->attachment;
10459            if (attach_first_use[attach_index]) {
10460                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout,
10461                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
10462            }
10463            attach_first_use[attach_index] = false;
10464        }
10465        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10466            auto attach_index = subpass.pInputAttachments[j].attachment;
10467            if (attach_index == VK_ATTACHMENT_UNUSED)
10468                continue;
10469
10470            switch (subpass.pInputAttachments[j].layout) {
10471            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
10472            case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
10473                // These are ideal.
10474                break;
10475
10476            case VK_IMAGE_LAYOUT_GENERAL:
10477                // May not be optimal. TODO: reconsider this warning based on other constraints.
10478                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10479                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10480                                "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
10481                break;
10482
10483            default:
10484                // No other layouts are acceptable
10485                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10486                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10487                                "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
10488                                string_VkImageLayout(subpass.pInputAttachments[j].layout));
10489            }
10490
10491            if (attach_first_use[attach_index]) {
10492                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout,
10493                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
10494            }
10495            attach_first_use[attach_index] = false;
10496        }
10497    }
10498    return skip;
10499}
10500
10501static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10502                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
10503    bool skip_call = false;
10504    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10505        DAGNode &subpass_node = subpass_to_node[i];
10506        subpass_node.pass = i;
10507    }
10508    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
10509        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
10510        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
10511            if (dependency.srcSubpass == dependency.dstSubpass) {
10512                skip_call |=
10513                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10514                            DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
10515            }
10516        } else if (dependency.srcSubpass > dependency.dstSubpass) {
10517            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10518                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
10519                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
10520        } else if (dependency.srcSubpass == dependency.dstSubpass) {
10521            has_self_dependency[dependency.srcSubpass] = true;
10522        } else {
10523            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
10524            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
10525        }
10526    }
10527    return skip_call;
10528}
10529
10530
10531VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
10532                                                  const VkAllocationCallbacks *pAllocator,
10533                                                  VkShaderModule *pShaderModule) {
10534    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10535    bool skip_call = false;
10536
10537    // Use SPIRV-Tools validator to try and catch any issues with the module itself
10538    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
10539    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
10540    spv_diagnostic diag = nullptr;
10541
10542    auto result = spvValidate(ctx, &binary, &diag);
10543    if (result != SPV_SUCCESS) {
10544        skip_call |=
10545            log_msg(dev_data->report_data, result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
10546                    VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
10547                    "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
10548    }
10549
10550    spvDiagnosticDestroy(diag);
10551    spvContextDestroy(ctx);
10552
10553    if (skip_call)
10554        return VK_ERROR_VALIDATION_FAILED_EXT;
10555
10556    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
10557
10558    if (res == VK_SUCCESS) {
10559        std::lock_guard<std::mutex> lock(global_lock);
10560        dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
10561    }
10562    return res;
10563}
10564
10565static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
10566    bool skip_call = false;
10567    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
10568        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10569                             VALIDATION_ERROR_00325, "DS",
10570                             "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s",
10571                             type, attachment, attachment_count, validation_error_map[VALIDATION_ERROR_00325]);
10572    }
10573    return skip_call;
10574}
10575
10576static bool IsPowerOfTwo(unsigned x) {
10577    return x && !(x & (x-1));
10578}
10579
10580static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
10581    bool skip_call = false;
10582    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10583        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10584        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
10585            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10586                                 VALIDATION_ERROR_00347, "DS",
10587                                 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s",
10588                                 i, validation_error_map[VALIDATION_ERROR_00347]);
10589        }
10590        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10591            uint32_t attachment = subpass.pPreserveAttachments[j];
10592            if (attachment == VK_ATTACHMENT_UNUSED) {
10593                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10594                                     __LINE__, VALIDATION_ERROR_00356, "DS",
10595                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
10596                                     validation_error_map[VALIDATION_ERROR_00356]);
10597            } else {
10598                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
10599            }
10600        }
10601
10602        auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
10603            subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
10604            [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
10605
10606        unsigned sample_count = 0;
10607
10608        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10609            uint32_t attachment;
10610            if (subpass.pResolveAttachments) {
10611                attachment = subpass.pResolveAttachments[j].attachment;
10612                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
10613
10614                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
10615                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
10616                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10617                                         __LINE__, VALIDATION_ERROR_00352, "DS",
10618                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
10619                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
10620                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
10621                                         validation_error_map[VALIDATION_ERROR_00352]);
10622                }
10623            }
10624            attachment = subpass.pColorAttachments[j].attachment;
10625            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
10626
10627            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10628                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10629
10630                if (subpass_performs_resolve &&
10631                    pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
10632                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10633                                         __LINE__, VALIDATION_ERROR_00351, "DS",
10634                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
10635                                         "which has VK_SAMPLE_COUNT_1_BIT. %s",
10636                                         i, attachment, validation_error_map[VALIDATION_ERROR_00351]);
10637                }
10638            }
10639        }
10640
10641        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10642            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10643            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
10644
10645            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10646                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10647            }
10648        }
10649
10650        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10651            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10652            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
10653        }
10654
10655        if (sample_count && !IsPowerOfTwo(sample_count)) {
10656            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10657                                 VALIDATION_ERROR_00337, "DS", "CreateRenderPass:  Subpass %u attempts to render to "
10658                                                               "attachments with inconsistent sample counts. %s",
10659                                 i, validation_error_map[VALIDATION_ERROR_00337]);
10660        }
10661    }
10662    return skip_call;
10663}
10664
10665VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10666                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
10667    bool skip_call = false;
10668    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10669
10670    std::unique_lock<std::mutex> lock(global_lock);
10671
10672    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
10673    //       ValidateLayouts.
10674    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
10675    if (!skip_call) {
10676        skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
10677    }
10678    lock.unlock();
10679
10680    if (skip_call) {
10681        return VK_ERROR_VALIDATION_FAILED_EXT;
10682    }
10683
10684    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
10685
10686    if (VK_SUCCESS == result) {
10687        lock.lock();
10688
10689        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
10690        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
10691        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
10692
10693        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
10694        render_pass->renderPass = *pRenderPass;
10695        render_pass->hasSelfDependency = has_self_dependency;
10696        render_pass->subpassToNode = subpass_to_node;
10697
10698        // TODO: Maybe fill list and then copy instead of locking
10699        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
10700        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
10701        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10702            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10703            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10704                uint32_t attachment = subpass.pColorAttachments[j].attachment;
10705                if (!attachment_first_read.count(attachment)) {
10706                    attachment_first_read.insert(std::make_pair(attachment, false));
10707                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
10708                }
10709            }
10710            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10711                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10712                if (!attachment_first_read.count(attachment)) {
10713                    attachment_first_read.insert(std::make_pair(attachment, false));
10714                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
10715                }
10716            }
10717            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10718                uint32_t attachment = subpass.pInputAttachments[j].attachment;
10719                if (!attachment_first_read.count(attachment)) {
10720                    attachment_first_read.insert(std::make_pair(attachment, true));
10721                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
10722                }
10723            }
10724        }
10725
10726        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
10727    }
10728    return result;
10729}
10730
10731static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10732    bool skip_call = false;
10733    auto const pRenderPassInfo = getRenderPassState(dev_data, pRenderPassBegin->renderPass)->createInfo.ptr();
10734    auto const & framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
10735    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10736        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10737                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10738                                                                 "with a different number of attachments.");
10739    }
10740    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10741        const VkImageView &image_view = framebufferInfo.pAttachments[i];
10742        auto view_state = getImageViewState(dev_data, image_view);
10743        assert(view_state);
10744        const VkImage &image = view_state->create_info.image;
10745        const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
10746        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10747                                             pRenderPassInfo->pAttachments[i].initialLayout};
10748        // TODO: Do not iterate over every possibility - consolidate where possible
10749        for (uint32_t j = 0; j < subRange.levelCount; j++) {
10750            uint32_t level = subRange.baseMipLevel + j;
10751            for (uint32_t k = 0; k < subRange.layerCount; k++) {
10752                uint32_t layer = subRange.baseArrayLayer + k;
10753                VkImageSubresource sub = {subRange.aspectMask, level, layer};
10754                IMAGE_CMD_BUF_LAYOUT_NODE node;
10755                if (!FindLayout(pCB, image, sub, node)) {
10756                    SetLayout(pCB, image, sub, newNode);
10757                    continue;
10758                }
10759                if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10760                    newNode.layout != node.layout) {
10761                    skip_call |=
10762                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10763                                DRAWSTATE_INVALID_RENDERPASS, "DS",
10764                                "You cannot start a render pass using attachment %u "
10765                                "where the render pass initial layout is %s and the previous "
10766                                "known layout of the attachment is %s. The layouts must match, or "
10767                                "the render pass initial layout for the attachment must be "
10768                                "VK_IMAGE_LAYOUT_UNDEFINED",
10769                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
10770                }
10771            }
10772        }
10773    }
10774    return skip_call;
10775}
10776
10777static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
10778                                          VkAttachmentReference ref) {
10779    if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10780        auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10781        SetLayout(dev_data, pCB, image_view, ref.layout);
10782    }
10783}
10784
10785static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
10786                                     const int subpass_index) {
10787    auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
10788    if (!renderPass)
10789        return;
10790
10791    auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
10792    if (!framebuffer)
10793        return;
10794
10795    auto const &subpass = renderPass->createInfo.pSubpasses[subpass_index];
10796    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10797        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
10798    }
10799    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10800        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
10801    }
10802    if (subpass.pDepthStencilAttachment) {
10803        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
10804    }
10805}
10806
10807static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name,
10808                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
10809    bool skip_call = false;
10810    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10811        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10812                             error_code, "DS", "Cannot execute command %s on a secondary command buffer. %s", cmd_name.c_str(),
10813                             validation_error_map[error_code]);
10814    }
10815    return skip_call;
10816}
10817
10818static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10819    auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
10820    if (!renderPass)
10821        return;
10822
10823    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->createInfo.ptr();
10824    auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
10825    if (!framebuffer)
10826        return;
10827
10828    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10829        auto image_view = framebuffer->createInfo.pAttachments[i];
10830        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10831    }
10832}
10833
10834static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10835    bool skip_call = false;
10836    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
10837        &getFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
10838    if (pRenderPassBegin->renderArea.offset.x < 0 ||
10839        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10840        pRenderPassBegin->renderArea.offset.y < 0 ||
10841        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10842        skip_call |= static_cast<bool>(log_msg(
10843            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10844            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10845            "Cannot execute a render pass with renderArea not within the bound of the "
10846            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10847            "height %d.",
10848            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10849            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10850    }
10851    return skip_call;
10852}
10853
10854// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10855// [load|store]Op flag must be checked
10856// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10857template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10858    if (color_depth_op != op && stencil_op != op) {
10859        return false;
10860    }
10861    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10862    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
10863
10864    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10865            ((check_stencil_load_op == true) && (stencil_op == op)));
10866}
10867
10868VKAPI_ATTR void VKAPI_CALL
10869CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
10870    bool skip_call = false;
10871    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10872    std::unique_lock<std::mutex> lock(global_lock);
10873    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
10874    auto renderPass = pRenderPassBegin ? getRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
10875    auto framebuffer = pRenderPassBegin ? getFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
10876    if (cb_node) {
10877        if (renderPass) {
10878            uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10879            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
10880            for (uint32_t i = 0; i < renderPass->createInfo.attachmentCount; ++i) {
10881                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10882                auto pAttachment = &renderPass->createInfo.pAttachments[i];
10883                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10884                                                         pAttachment->stencilLoadOp,
10885                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10886                    clear_op_size = static_cast<uint32_t>(i) + 1;
10887                    std::function<bool()> function = [=]() {
10888                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
10889                        return false;
10890                    };
10891                    cb_node->validate_functions.push_back(function);
10892                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10893                                                                pAttachment->stencilLoadOp,
10894                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
10895                    std::function<bool()> function = [=]() {
10896                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
10897                        return false;
10898                    };
10899                    cb_node->validate_functions.push_back(function);
10900                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10901                                                                pAttachment->stencilLoadOp,
10902                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
10903                    std::function<bool()> function = [=]() {
10904                        return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
10905                                                          "vkCmdBeginRenderPass()");
10906                    };
10907                    cb_node->validate_functions.push_back(function);
10908                }
10909                if (renderPass->attachment_first_read[i]) {
10910                    std::function<bool()> function = [=]() {
10911                        return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
10912                                                          "vkCmdBeginRenderPass()");
10913                    };
10914                    cb_node->validate_functions.push_back(function);
10915                }
10916            }
10917            if (clear_op_size > pRenderPassBegin->clearValueCount) {
10918                skip_call |= log_msg(
10919                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10920                    reinterpret_cast<uint64_t &>(renderPass), __LINE__, VALIDATION_ERROR_00442,
10921                    "DS", "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10922                          "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
10923                          "0x%" PRIx64 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10924                          "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10925                          "attachments that aren't cleared they will be ignored. %s",
10926                    pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass), clear_op_size,
10927                    clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
10928            }
10929            if (clear_op_size < pRenderPassBegin->clearValueCount) {
10930                skip_call |= log_msg(
10931                    dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10932                    reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_TOO_MANY_CLEAR_VALUES, "DS",
10933                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but only first %u "
10934                    "entries in pClearValues array are used. The highest index attachment in renderPass 0x%" PRIx64
10935                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u - other pClearValues are ignored.",
10936                    pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass), clear_op_size);
10937            }
10938            skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
10939            skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
10940            skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_00440);
10941            skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
10942            skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass", VALIDATION_ERROR_00441);
10943            skip_call |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10944            UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BEGINRENDERPASS);
10945            cb_node->activeRenderPass = renderPass;
10946            // This is a shallow copy as that is all that is needed for now
10947            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10948            cb_node->activeSubpass = 0;
10949            cb_node->activeSubpassContents = contents;
10950            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
10951            // Connect this framebuffer and its children to this cmdBuffer
10952            AddFramebufferBinding(dev_data, cb_node, framebuffer);
10953            // transition attachments to the correct layouts for the first subpass
10954            TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
10955        }
10956    }
10957    lock.unlock();
10958    if (!skip_call) {
10959        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
10960    }
10961}
10962
10963VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10964    bool skip_call = false;
10965    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10966    std::unique_lock<std::mutex> lock(global_lock);
10967    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10968    if (pCB) {
10969        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass", VALIDATION_ERROR_00459);
10970        skip_call |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10971        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_NEXTSUBPASS);
10972        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_00458);
10973
10974        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
10975        if (pCB->activeSubpass == subpassCount - 1) {
10976            skip_call |= log_msg(
10977                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10978                reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00453, "DS",
10979                "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s", validation_error_map[VALIDATION_ERROR_00453]);
10980        }
10981    }
10982    lock.unlock();
10983
10984    if (skip_call)
10985        return;
10986
10987    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
10988
10989    if (pCB) {
10990      lock.lock();
10991      pCB->activeSubpass++;
10992      pCB->activeSubpassContents = contents;
10993      TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
10994    }
10995}
10996
10997VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
10998    bool skip_call = false;
10999    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
11000    std::unique_lock<std::mutex> lock(global_lock);
11001    auto pCB = getCBNode(dev_data, commandBuffer);
11002    if (pCB) {
11003        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
11004        auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
11005        if (rp_state) {
11006            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
11007                skip_call |= log_msg(
11008                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11009                    reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00460, "DS",
11010                    "vkCmdEndRenderPass(): Called before reaching final subpass. %s", validation_error_map[VALIDATION_ERROR_00460]);
11011            }
11012
11013            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
11014                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
11015                auto pAttachment = &rp_state->createInfo.pAttachments[i];
11016                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
11017                                                         pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE)) {
11018                    std::function<bool()> function = [=]() {
11019                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
11020                        return false;
11021                    };
11022                    pCB->validate_functions.push_back(function);
11023                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
11024                                                                pAttachment->stencilStoreOp,
11025                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
11026                    std::function<bool()> function = [=]() {
11027                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
11028                        return false;
11029                    };
11030                    pCB->validate_functions.push_back(function);
11031                }
11032            }
11033        }
11034        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_00464);
11035        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass", VALIDATION_ERROR_00465);
11036        skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
11037        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDRENDERPASS);
11038    }
11039    lock.unlock();
11040
11041    if (skip_call)
11042        return;
11043
11044    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
11045
11046    if (pCB) {
11047        lock.lock();
11048        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
11049        pCB->activeRenderPass = nullptr;
11050        pCB->activeSubpass = 0;
11051        pCB->activeFramebuffer = VK_NULL_HANDLE;
11052    }
11053}
11054
11055static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
11056                                        uint32_t secondaryAttach, const char *msg) {
11057    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11058                   VALIDATION_ERROR_02059, "DS",
11059                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
11060                   "that is not compatible with the Primary Cmd Buffer current render pass. "
11061                   "Attachment %u is not compatible with %u: %s. %s",
11062                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg,
11063                   validation_error_map[VALIDATION_ERROR_02059]);
11064}
11065
11066static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11067                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
11068                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
11069                                            uint32_t secondaryAttach, bool is_multi) {
11070    bool skip_call = false;
11071    if (primaryPassCI->attachmentCount <= primaryAttach) {
11072        primaryAttach = VK_ATTACHMENT_UNUSED;
11073    }
11074    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
11075        secondaryAttach = VK_ATTACHMENT_UNUSED;
11076    }
11077    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
11078        return skip_call;
11079    }
11080    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
11081        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
11082                                                 "The first is unused while the second is not.");
11083        return skip_call;
11084    }
11085    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
11086        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
11087                                                 "The second is unused while the first is not.");
11088        return skip_call;
11089    }
11090    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
11091        skip_call |=
11092            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
11093    }
11094    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
11095        skip_call |=
11096            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
11097    }
11098    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
11099        skip_call |=
11100            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
11101    }
11102    return skip_call;
11103}
11104
11105static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11106                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
11107                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
11108    bool skip_call = false;
11109    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
11110    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
11111    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
11112    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
11113        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
11114        if (i < primary_desc.inputAttachmentCount) {
11115            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
11116        }
11117        if (i < secondary_desc.inputAttachmentCount) {
11118            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
11119        }
11120        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
11121                                                     secondaryPassCI, secondary_input_attach, is_multi);
11122    }
11123    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
11124    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
11125        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
11126        if (i < primary_desc.colorAttachmentCount) {
11127            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
11128        }
11129        if (i < secondary_desc.colorAttachmentCount) {
11130            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
11131        }
11132        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
11133                                                     secondaryPassCI, secondary_color_attach, is_multi);
11134        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
11135        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
11136            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
11137        }
11138        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
11139            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
11140        }
11141        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
11142                                                     secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
11143    }
11144    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
11145    if (primary_desc.pDepthStencilAttachment) {
11146        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
11147    }
11148    if (secondary_desc.pDepthStencilAttachment) {
11149        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
11150    }
11151    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
11152                                                 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
11153    return skip_call;
11154}
11155
11156// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
11157//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
11158//  will then feed into this function
11159static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11160                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
11161                                            VkRenderPassCreateInfo const *secondaryPassCI) {
11162    bool skip_call = false;
11163
11164    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
11165        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11166                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
11167                             "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
11168                             " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
11169                             " that has a subpassCount of %u.",
11170                             reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
11171                             reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
11172    } else {
11173        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
11174            skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
11175                                                      primaryPassCI->subpassCount > 1);
11176        }
11177    }
11178    return skip_call;
11179}
11180
11181static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
11182                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
11183    bool skip_call = false;
11184    if (!pSubCB->beginInfo.pInheritanceInfo) {
11185        return skip_call;
11186    }
11187    VkFramebuffer primary_fb = pCB->activeFramebuffer;
11188    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
11189    if (secondary_fb != VK_NULL_HANDLE) {
11190        if (primary_fb != secondary_fb) {
11191            skip_call |= log_msg(
11192                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11193                VALIDATION_ERROR_02060, "DS",
11194                "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64 " which has a framebuffer 0x%" PRIx64
11195                " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
11196                reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
11197                reinterpret_cast<uint64_t &>(primary_fb), validation_error_map[VALIDATION_ERROR_02060]);
11198        }
11199        auto fb = getFramebufferState(dev_data, secondary_fb);
11200        if (!fb) {
11201            skip_call |=
11202                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11203                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11204                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
11205                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
11206            return skip_call;
11207        }
11208        auto cb_renderpass = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
11209        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
11210            skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
11211                                                         cb_renderpass->createInfo.ptr());
11212        }
11213    }
11214    return skip_call;
11215}
11216
11217static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
11218    bool skip_call = false;
11219    unordered_set<int> activeTypes;
11220    for (auto queryObject : pCB->activeQueries) {
11221        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
11222        if (queryPoolData != dev_data->queryPoolMap.end()) {
11223            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
11224                pSubCB->beginInfo.pInheritanceInfo) {
11225                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
11226                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
11227                    skip_call |=
11228                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11229                                VALIDATION_ERROR_02065, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11230                                                              "which has invalid active query pool 0x%" PRIx64
11231                                                              ". Pipeline statistics is being queried so the command "
11232                                                              "buffer must have all bits set on the queryPool. %s",
11233                                pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
11234                                validation_error_map[VALIDATION_ERROR_02065]);
11235                }
11236            }
11237            activeTypes.insert(queryPoolData->second.createInfo.queryType);
11238        }
11239    }
11240    for (auto queryObject : pSubCB->startedQueries) {
11241        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
11242        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
11243            skip_call |=
11244                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11245                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
11246                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11247                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
11248                        "secondary Cmd Buffer 0x%p.",
11249                        pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
11250                        queryPoolData->second.createInfo.queryType, pSubCB->commandBuffer);
11251        }
11252    }
11253
11254    auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
11255    auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
11256    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
11257        skip_call |=
11258            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11259                    reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
11260                    "vkCmdExecuteCommands(): Primary command buffer 0x%p"
11261                    " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
11262                    pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
11263    }
11264
11265    return skip_call;
11266}
11267
11268VKAPI_ATTR void VKAPI_CALL
11269CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
11270    bool skip_call = false;
11271    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
11272    std::unique_lock<std::mutex> lock(global_lock);
11273    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
11274    if (pCB) {
11275        GLOBAL_CB_NODE *pSubCB = NULL;
11276        for (uint32_t i = 0; i < commandBuffersCount; i++) {
11277            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
11278            assert(pSubCB);
11279            if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
11280                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
11281                                     __LINE__, VALIDATION_ERROR_00153, "DS",
11282                                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
11283                                     "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
11284                                     pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_00153]);
11285            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
11286                auto secondary_rp_state = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
11287                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
11288                    skip_call |= log_msg(
11289                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11290                        (uint64_t)pCommandBuffers[i], __LINE__, VALIDATION_ERROR_02057, "DS",
11291                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
11292                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set. %s",
11293                        pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass,
11294                        validation_error_map[VALIDATION_ERROR_02057]);
11295                } else {
11296                    // Make sure render pass is compatible with parent command buffer pass if has continue
11297                    if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
11298                        skip_call |=
11299                            validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
11300                                                            pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
11301                    }
11302                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
11303                    skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
11304                }
11305                string errorString = "";
11306                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
11307                if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
11308                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
11309                                                     secondary_rp_state->createInfo.ptr(), errorString)) {
11310                    skip_call |= log_msg(
11311                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11312                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
11313                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
11314                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
11315                        pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, commandBuffer,
11316                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
11317                }
11318            }
11319            // TODO(mlentine): Move more logic into this method
11320            skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
11321            skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
11322            // Secondary cmdBuffers are considered pending execution starting w/
11323            // being recorded
11324            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
11325                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
11326                    skip_call |=
11327                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11328                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCB->commandBuffer), __LINE__,
11329                                VALIDATION_ERROR_00154, "DS", "Attempt to simultaneously execute command buffer 0x%p"
11330                                                              " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
11331                                pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_00154]);
11332                }
11333                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
11334                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
11335                    skip_call |= log_msg(
11336                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11337                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
11338                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
11339                        "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
11340                        "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
11341                        "set, even though it does.",
11342                        pCommandBuffers[i], pCB->commandBuffer);
11343                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
11344                }
11345            }
11346            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
11347                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11348                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(pCommandBuffers[i]),
11349                                     __LINE__, VALIDATION_ERROR_02062, "DS", "vkCmdExecuteCommands(): Secondary Command Buffer "
11350                                                                             "(0x%p) cannot be submitted with a query in "
11351                                                                             "flight and inherited queries not "
11352                                                                             "supported on this device. %s",
11353                                     pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_02062]);
11354            }
11355            // Propagate layout transitions to the primary cmd buffer
11356            for (auto ilm_entry : pSubCB->imageLayoutMap) {
11357                SetLayout(pCB, ilm_entry.first, ilm_entry.second);
11358            }
11359            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
11360            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
11361            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
11362            for (auto &function : pSubCB->queryUpdates) {
11363                pCB->queryUpdates.push_back(function);
11364            }
11365        }
11366        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands", VALIDATION_ERROR_00163);
11367        skip_call |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
11368        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_EXECUTECOMMANDS);
11369    }
11370    lock.unlock();
11371    if (!skip_call)
11372        dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
11373}
11374
11375// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
11376static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
11377                                    VkDeviceSize end_offset) {
11378    bool skip_call = false;
11379    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11380    // Iterate over all bound image ranges and verify that for any that overlap the
11381    //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
11382    // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
11383    for (auto image_handle : mem_info->bound_images) {
11384        auto img_it = mem_info->bound_ranges.find(image_handle);
11385        if (img_it != mem_info->bound_ranges.end()) {
11386            if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
11387                std::vector<VkImageLayout> layouts;
11388                if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
11389                    for (auto layout : layouts) {
11390                        if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
11391                            skip_call |=
11392                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
11393                                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
11394                                                                                        "GENERAL or PREINITIALIZED are supported.",
11395                                        string_VkImageLayout(layout));
11396                        }
11397                    }
11398                }
11399            }
11400        }
11401    }
11402    return skip_call;
11403}
11404
11405VKAPI_ATTR VkResult VKAPI_CALL
11406MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
11407    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11408
11409    bool skip_call = false;
11410    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11411    std::unique_lock<std::mutex> lock(global_lock);
11412    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
11413    if (mem_info) {
11414        // TODO : This could me more fine-grained to track just region that is valid
11415        mem_info->global_valid = true;
11416        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
11417        skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
11418        // TODO : Do we need to create new "bound_range" for the mapped range?
11419        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
11420        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
11421             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
11422            skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11423                                (uint64_t)mem, __LINE__, VALIDATION_ERROR_00629, "MEM",
11424                                "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
11425                                (uint64_t)mem, validation_error_map[VALIDATION_ERROR_00629]);
11426        }
11427    }
11428    skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
11429    lock.unlock();
11430
11431    if (!skip_call) {
11432        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
11433        if (VK_SUCCESS == result) {
11434            lock.lock();
11435            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
11436            storeMemRanges(dev_data, mem, offset, size);
11437            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
11438            lock.unlock();
11439        }
11440    }
11441    return result;
11442}
11443
11444VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
11445    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11446    bool skip_call = false;
11447
11448    std::unique_lock<std::mutex> lock(global_lock);
11449    skip_call |= deleteMemRanges(dev_data, mem);
11450    lock.unlock();
11451    if (!skip_call) {
11452        dev_data->dispatch_table.UnmapMemory(device, mem);
11453    }
11454}
11455
11456static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
11457                                   const VkMappedMemoryRange *pMemRanges) {
11458    bool skip = false;
11459    for (uint32_t i = 0; i < memRangeCount; ++i) {
11460        auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
11461        if (mem_info) {
11462            if (pMemRanges[i].size == VK_WHOLE_SIZE) {
11463                if (mem_info->mem_range.offset > pMemRanges[i].offset) {
11464                    skip |=
11465                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11466                                (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00643, "MEM",
11467                                "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
11468                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
11469                                funcName, static_cast<size_t>(pMemRanges[i].offset),
11470                                static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_00643]);
11471                }
11472            } else {
11473                const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
11474                                              ? mem_info->alloc_info.allocationSize
11475                                              : (mem_info->mem_range.offset + mem_info->mem_range.size);
11476                if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
11477                    (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
11478                    skip |=
11479                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11480                                (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00642, "MEM",
11481                                "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
11482                                ") exceed the Memory Object's upper-bound "
11483                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
11484                                funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
11485                                static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
11486                                validation_error_map[VALIDATION_ERROR_00642]);
11487                }
11488            }
11489        }
11490    }
11491    return skip;
11492}
11493
11494static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
11495                                                     const VkMappedMemoryRange *mem_ranges) {
11496    bool skip = false;
11497    for (uint32_t i = 0; i < mem_range_count; ++i) {
11498        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
11499        if (mem_info) {
11500            if (mem_info->shadow_copy) {
11501                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11502                                        ? mem_info->mem_range.size
11503                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
11504                char *data = static_cast<char *>(mem_info->shadow_copy);
11505                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
11506                    if (data[j] != NoncoherentMemoryFillValue) {
11507                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11508                                        VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
11509                                        MEMTRACK_INVALID_MAP, "MEM", "Memory underflow was detected on mem obj 0x%" PRIxLEAST64,
11510                                        (uint64_t)mem_ranges[i].memory);
11511                    }
11512                }
11513                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
11514                    if (data[j] != NoncoherentMemoryFillValue) {
11515                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11516                                        VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
11517                                        MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
11518                                        (uint64_t)mem_ranges[i].memory);
11519                    }
11520                }
11521                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
11522            }
11523        }
11524    }
11525    return skip;
11526}
11527
11528static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
11529    for (uint32_t i = 0; i < mem_range_count; ++i) {
11530        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
11531        if (mem_info && mem_info->shadow_copy) {
11532            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11533                                    ? mem_info->mem_range.size
11534                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
11535            char *data = static_cast<char *>(mem_info->shadow_copy);
11536            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
11537        }
11538    }
11539}
11540
11541static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
11542                                                  const VkMappedMemoryRange *mem_ranges) {
11543    bool skip = false;
11544    for (uint32_t i = 0; i < mem_range_count; ++i) {
11545        uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
11546        if (vk_safe_modulo(mem_ranges[i].offset, atom_size) != 0) {
11547            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
11548                            __LINE__, VALIDATION_ERROR_00644, "MEM",
11549                            "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
11550                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
11551                            func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_00644]);
11552        }
11553        if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (vk_safe_modulo(mem_ranges[i].size, atom_size) != 0)) {
11554            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
11555                            __LINE__, VALIDATION_ERROR_00645, "MEM",
11556                            "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
11557                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
11558                            func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_00645]);
11559        }
11560    }
11561    return skip;
11562}
11563
11564static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11565                                                   const VkMappedMemoryRange *mem_ranges) {
11566    bool skip = false;
11567    std::lock_guard<std::mutex> lock(global_lock);
11568    skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
11569    skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
11570    return skip;
11571}
11572
11573VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
11574                                                       const VkMappedMemoryRange *pMemRanges) {
11575    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11576    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11577
11578    if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
11579        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
11580    }
11581    return result;
11582}
11583
11584static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11585                                                        const VkMappedMemoryRange *mem_ranges) {
11586    bool skip = false;
11587    std::lock_guard<std::mutex> lock(global_lock);
11588    skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
11589    return skip;
11590}
11591
11592static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11593                                                       const VkMappedMemoryRange *mem_ranges) {
11594    std::lock_guard<std::mutex> lock(global_lock);
11595    // Update our shadow copy with modified driver data
11596    CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
11597}
11598
11599VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
11600                                                            const VkMappedMemoryRange *pMemRanges) {
11601    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11602    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11603
11604    if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
11605        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
11606        if (result == VK_SUCCESS) {
11607            PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
11608        }
11609    }
11610    return result;
11611}
11612
11613VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
11614    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11615    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11616    bool skip_call = false;
11617    std::unique_lock<std::mutex> lock(global_lock);
11618    auto image_state = getImageState(dev_data, image);
11619    if (image_state) {
11620        // Track objects tied to memory
11621        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
11622        skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
11623        if (!image_state->memory_requirements_checked) {
11624            // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
11625            //  BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
11626            //  vkGetImageMemoryRequirements()
11627            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11628                                 image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
11629                                 "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
11630                                 " but vkGetImageMemoryRequirements() has not been called on that image.",
11631                                 image_handle);
11632            // Make the call for them so we can verify the state
11633            lock.unlock();
11634            dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &image_state->requirements);
11635            lock.lock();
11636        }
11637
11638        // Track and validate bound memory range information
11639        auto mem_info = getMemObjInfo(dev_data, mem);
11640        if (mem_info) {
11641            skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
11642                                                image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
11643            skip_call |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
11644                                             VALIDATION_ERROR_00806);
11645        }
11646
11647        lock.unlock();
11648        if (!skip_call) {
11649            result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
11650            lock.lock();
11651            image_state->binding.mem = mem;
11652            image_state->binding.offset = memoryOffset;
11653            image_state->binding.size = image_state->requirements.size;
11654            lock.unlock();
11655        }
11656    } else {
11657        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11658                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
11659                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
11660                reinterpret_cast<const uint64_t &>(image));
11661    }
11662    return result;
11663}
11664
11665VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
11666    bool skip_call = false;
11667    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11668    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11669    std::unique_lock<std::mutex> lock(global_lock);
11670    auto event_state = getEventNode(dev_data, event);
11671    if (event_state) {
11672        event_state->needsSignaled = false;
11673        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
11674        if (event_state->write_in_use) {
11675            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
11676                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11677                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
11678                                 reinterpret_cast<const uint64_t &>(event));
11679        }
11680    }
11681    lock.unlock();
11682    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
11683    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
11684    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
11685    for (auto queue_data : dev_data->queueMap) {
11686        auto event_entry = queue_data.second.eventToStageMap.find(event);
11687        if (event_entry != queue_data.second.eventToStageMap.end()) {
11688            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
11689        }
11690    }
11691    if (!skip_call)
11692        result = dev_data->dispatch_table.SetEvent(device, event);
11693    return result;
11694}
11695
11696VKAPI_ATTR VkResult VKAPI_CALL
11697QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
11698    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11699    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11700    bool skip_call = false;
11701    std::unique_lock<std::mutex> lock(global_lock);
11702    auto pFence = getFenceNode(dev_data, fence);
11703    auto pQueue = getQueueState(dev_data, queue);
11704
11705    // First verify that fence is not in use
11706    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11707
11708    if (pFence) {
11709        SubmitFence(pQueue, pFence, bindInfoCount);
11710    }
11711
11712    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11713        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
11714        // Track objects tied to memory
11715        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
11716            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
11717                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
11718                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11719                                        (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
11720                                        "vkQueueBindSparse"))
11721                    skip_call = true;
11722            }
11723        }
11724        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
11725            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
11726                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
11727                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11728                                        (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11729                                        "vkQueueBindSparse"))
11730                    skip_call = true;
11731            }
11732        }
11733        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
11734            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
11735                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
11736                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
11737                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
11738                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
11739                                        (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11740                                        "vkQueueBindSparse"))
11741                    skip_call = true;
11742            }
11743        }
11744
11745        std::vector<SEMAPHORE_WAIT> semaphore_waits;
11746        std::vector<VkSemaphore> semaphore_signals;
11747        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11748            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11749            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11750            if (pSemaphore) {
11751                if (pSemaphore->signaled) {
11752                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11753                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11754                        pSemaphore->in_use.fetch_add(1);
11755                    }
11756                    pSemaphore->signaler.first = VK_NULL_HANDLE;
11757                    pSemaphore->signaled = false;
11758                } else {
11759                    skip_call |= log_msg(
11760                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11761                        reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11762                        "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11763                        queue, reinterpret_cast<const uint64_t &>(semaphore));
11764                }
11765            }
11766        }
11767        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11768            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11769            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11770            if (pSemaphore) {
11771                if (pSemaphore->signaled) {
11772                    skip_call =
11773                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11774                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11775                                "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
11776                                ", but that semaphore is already signaled.",
11777                                queue, reinterpret_cast<const uint64_t &>(semaphore));
11778                }
11779                else {
11780                    pSemaphore->signaler.first = queue;
11781                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11782                    pSemaphore->signaled = true;
11783                    pSemaphore->in_use.fetch_add(1);
11784                    semaphore_signals.push_back(semaphore);
11785                }
11786            }
11787        }
11788
11789        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11790                                         semaphore_waits,
11791                                         semaphore_signals,
11792                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11793    }
11794
11795    if (pFence && !bindInfoCount) {
11796        // No work to do, just dropping a fence in the queue by itself.
11797        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11798                                         std::vector<SEMAPHORE_WAIT>(),
11799                                         std::vector<VkSemaphore>(),
11800                                         fence);
11801    }
11802
11803    lock.unlock();
11804
11805    if (!skip_call)
11806        return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
11807
11808    return result;
11809}
11810
11811VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11812                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
11813    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11814    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
11815    if (result == VK_SUCCESS) {
11816        std::lock_guard<std::mutex> lock(global_lock);
11817        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
11818        sNode->signaler.first = VK_NULL_HANDLE;
11819        sNode->signaler.second = 0;
11820        sNode->signaled = false;
11821    }
11822    return result;
11823}
11824
11825VKAPI_ATTR VkResult VKAPI_CALL
11826CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
11827    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11828    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
11829    if (result == VK_SUCCESS) {
11830        std::lock_guard<std::mutex> lock(global_lock);
11831        dev_data->eventMap[*pEvent].needsSignaled = false;
11832        dev_data->eventMap[*pEvent].write_in_use = 0;
11833        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11834    }
11835    return result;
11836}
11837
11838static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
11839                                              VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
11840                                              SWAPCHAIN_NODE *old_swapchain_state) {
11841    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
11842
11843    // TODO: revisit this. some of these rules are being relaxed.
11844    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
11845        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11846                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
11847                    "%s: surface has an existing swapchain other than oldSwapchain", func_name))
11848            return true;
11849    }
11850    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
11851        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11852                    reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
11853                    "DS", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
11854            return true;
11855    }
11856    auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
11857    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
11858        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11859                    reinterpret_cast<uint64_t>(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
11860                    "%s: surface capabilities not retrieved for this physical device", func_name))
11861            return true;
11862    } else { // have valid capabilities
11863        auto &capabilities = physical_device_state->surfaceCapabilities;
11864        // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
11865        if (pCreateInfo->minImageCount < capabilities.minImageCount) {
11866            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11867                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02331, "DS",
11868                        "%s called with minImageCount = %d, which is outside the bounds returned "
11869                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
11870                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
11871                        validation_error_map[VALIDATION_ERROR_02331]))
11872                return true;
11873        }
11874
11875        if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
11876            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11877                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02332, "DS",
11878                        "%s called with minImageCount = %d, which is outside the bounds returned "
11879                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
11880                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
11881                        validation_error_map[VALIDATION_ERROR_02332]))
11882                return true;
11883        }
11884
11885        // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
11886        if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
11887            ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
11888             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
11889             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
11890             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
11891            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11892                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
11893                        "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
11894                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
11895                        "maxImageExtent = (%d,%d). %s",
11896                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11897                        capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
11898                        capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
11899                        validation_error_map[VALIDATION_ERROR_02334]))
11900                return true;
11901        }
11902        if ((capabilities.currentExtent.width != kSurfaceSizeFromSwapchain) &&
11903            ((pCreateInfo->imageExtent.width != capabilities.currentExtent.width) ||
11904             (pCreateInfo->imageExtent.height != capabilities.currentExtent.height))) {
11905            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11906                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
11907                        "%s called with imageExtent = (%d,%d), which is not equal to the currentExtent = (%d,%d) returned by "
11908                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(). %s",
11909                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11910                        capabilities.currentExtent.width, capabilities.currentExtent.height,
11911                        validation_error_map[VALIDATION_ERROR_02334]))
11912                return true;
11913        }
11914        // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
11915        // VkSurfaceCapabilitiesKHR::supportedTransforms.
11916        if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
11917            !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
11918            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
11919            // it up a little at a time, and then log it:
11920            std::string errorString = "";
11921            char str[1024];
11922            // Here's the first part of the message:
11923            sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
11924                    string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
11925            errorString += str;
11926            for (int i = 0; i < 32; i++) {
11927                // Build up the rest of the message:
11928                if ((1 << i) & capabilities.supportedTransforms) {
11929                    const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
11930                    sprintf(str, "    %s\n", newStr);
11931                    errorString += str;
11932                }
11933            }
11934            // Log the message that we've built up:
11935            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11936                        reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02339, "DS", "%s. %s",
11937                        errorString.c_str(), validation_error_map[VALIDATION_ERROR_02339]))
11938                return true;
11939        }
11940
11941        // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
11942        // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
11943        if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
11944            !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
11945            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
11946            // it up a little at a time, and then log it:
11947            std::string errorString = "";
11948            char str[1024];
11949            // Here's the first part of the message:
11950            sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
11951                    func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
11952            errorString += str;
11953            for (int i = 0; i < 32; i++) {
11954                // Build up the rest of the message:
11955                if ((1 << i) & capabilities.supportedCompositeAlpha) {
11956                    const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
11957                    sprintf(str, "    %s\n", newStr);
11958                    errorString += str;
11959                }
11960            }
11961            // Log the message that we've built up:
11962            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11963                        reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02340, "DS", "%s. %s",
11964                        errorString.c_str(), validation_error_map[VALIDATION_ERROR_02340]))
11965                return true;
11966        }
11967        // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
11968        if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
11969            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11970                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02335, "DS",
11971                        "%s called with a non-supported imageArrayLayers (i.e. %d).  Minimum value is 1, maximum value is %d. %s",
11972                        func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
11973                        validation_error_map[VALIDATION_ERROR_02335]))
11974                return true;
11975        }
11976        // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
11977        if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
11978            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11979                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02336, "DS",
11980                        "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
11981                        func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
11982                        validation_error_map[VALIDATION_ERROR_02336]))
11983                return true;
11984        }
11985    }
11986
11987    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
11988    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
11989        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11990                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
11991                    "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
11992            return true;
11993    } else {
11994        // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
11995        bool foundFormat = false;
11996        bool foundColorSpace = false;
11997        bool foundMatch = false;
11998        for (auto const &format : physical_device_state->surface_formats) {
11999            if (pCreateInfo->imageFormat == format.format) {
12000                // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
12001                foundFormat = true;
12002                if (pCreateInfo->imageColorSpace == format.colorSpace) {
12003                    foundMatch = true;
12004                    break;
12005                }
12006            } else {
12007                if (pCreateInfo->imageColorSpace == format.colorSpace) {
12008                    foundColorSpace = true;
12009                }
12010            }
12011        }
12012        if (!foundMatch) {
12013            if (!foundFormat) {
12014                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12015                            reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
12016                            "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s",
12017                            func_name, pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_02333]))
12018                    return true;
12019            }
12020            if (!foundColorSpace) {
12021                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12022                            reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
12023                            "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s",
12024                            func_name, pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_02333]))
12025                    return true;
12026            }
12027        }
12028    }
12029
12030    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
12031    if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
12032        // FIFO is required to always be supported
12033        if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
12034            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12035                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
12036                        "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
12037                return true;
12038        }
12039    } else {
12040        // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
12041        bool foundMatch = std::find(physical_device_state->present_modes.begin(),
12042                                    physical_device_state->present_modes.end(),
12043                                    pCreateInfo->presentMode) != physical_device_state->present_modes.end();
12044        if (!foundMatch) {
12045            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12046                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02341, "DS",
12047                        "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
12048                        string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_02341]))
12049                return true;
12050        }
12051    }
12052
12053    return false;
12054}
12055
12056static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
12057                                             VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
12058                                             SWAPCHAIN_NODE *old_swapchain_state) {
12059    if (VK_SUCCESS == result) {
12060        std::lock_guard<std::mutex> lock(global_lock);
12061        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
12062        surface_state->swapchain = swapchain_state.get();
12063        dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
12064    } else {
12065        surface_state->swapchain = nullptr;
12066    }
12067    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
12068    if (old_swapchain_state) {
12069        old_swapchain_state->replaced = true;
12070    }
12071    surface_state->old_swapchain = old_swapchain_state;
12072    return;
12073}
12074
12075VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
12076                                                  const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
12077    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12078    auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface);
12079    auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
12080
12081    if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
12082        return VK_ERROR_VALIDATION_FAILED_EXT;
12083    }
12084
12085    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
12086
12087    PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
12088
12089    return result;
12090}
12091
12092VKAPI_ATTR void VKAPI_CALL
12093DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
12094    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12095    bool skip_call = false;
12096
12097    std::unique_lock<std::mutex> lock(global_lock);
12098    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
12099    if (swapchain_data) {
12100        if (swapchain_data->images.size() > 0) {
12101            for (auto swapchain_image : swapchain_data->images) {
12102                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
12103                if (image_sub != dev_data->imageSubresourceMap.end()) {
12104                    for (auto imgsubpair : image_sub->second) {
12105                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
12106                        if (image_item != dev_data->imageLayoutMap.end()) {
12107                            dev_data->imageLayoutMap.erase(image_item);
12108                        }
12109                    }
12110                    dev_data->imageSubresourceMap.erase(image_sub);
12111                }
12112                skip_call =
12113                    ClearMemoryObjectBindings(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
12114                dev_data->imageMap.erase(swapchain_image);
12115            }
12116        }
12117
12118        auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
12119        if (surface_state) {
12120            if (surface_state->swapchain == swapchain_data)
12121                surface_state->swapchain = nullptr;
12122            if (surface_state->old_swapchain == swapchain_data)
12123                surface_state->old_swapchain = nullptr;
12124        }
12125
12126        dev_data->device_extensions.swapchainMap.erase(swapchain);
12127    }
12128    lock.unlock();
12129    if (!skip_call)
12130        dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
12131}
12132
12133VKAPI_ATTR VkResult VKAPI_CALL
12134GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
12135    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12136    VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
12137
12138    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
12139        // This should never happen and is checked by param checker.
12140        if (!pCount)
12141            return result;
12142        std::lock_guard<std::mutex> lock(global_lock);
12143        const size_t count = *pCount;
12144        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
12145        if (swapchain_node && !swapchain_node->images.empty()) {
12146            // TODO : Not sure I like the memcmp here, but it works
12147            const bool mismatch = (swapchain_node->images.size() != count ||
12148                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
12149            if (mismatch) {
12150                // TODO: Verify against Valid Usage section of extension
12151                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12152                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
12153                        "vkGetSwapchainInfoKHR(0x%" PRIx64
12154                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
12155                        (uint64_t)(swapchain));
12156            }
12157        }
12158        for (uint32_t i = 0; i < *pCount; ++i) {
12159            IMAGE_LAYOUT_NODE image_layout_node;
12160            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
12161            image_layout_node.format = swapchain_node->createInfo.imageFormat;
12162            // Add imageMap entries for each swapchain image
12163            VkImageCreateInfo image_ci = {};
12164            image_ci.mipLevels = 1;
12165            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
12166            image_ci.usage = swapchain_node->createInfo.imageUsage;
12167            image_ci.format = swapchain_node->createInfo.imageFormat;
12168            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
12169            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
12170            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
12171            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
12172            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
12173            auto &image_state = dev_data->imageMap[pSwapchainImages[i]];
12174            image_state->valid = false;
12175            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
12176            swapchain_node->images.push_back(pSwapchainImages[i]);
12177            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
12178            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
12179            dev_data->imageLayoutMap[subpair] = image_layout_node;
12180            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
12181        }
12182    }
12183    return result;
12184}
12185
12186VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
12187    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
12188    bool skip_call = false;
12189
12190    std::lock_guard<std::mutex> lock(global_lock);
12191    auto queue_state = getQueueState(dev_data, queue);
12192
12193    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
12194        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
12195        if (pSemaphore && !pSemaphore->signaled) {
12196            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12197                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
12198                                 "DS", "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
12199                                 reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
12200        }
12201    }
12202
12203    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
12204        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
12205        if (swapchain_data) {
12206            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
12207                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12208                                     reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
12209                                     "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
12210                                     pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
12211            }
12212            else {
12213                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
12214                auto image_state = getImageState(dev_data, image);
12215                skip_call |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
12216
12217                if (!image_state->acquired) {
12218                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12219                                         reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
12220                                         "DS", "vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
12221                                         pPresentInfo->pImageIndices[i]);
12222                }
12223
12224                vector<VkImageLayout> layouts;
12225                if (FindLayouts(dev_data, image, layouts)) {
12226                    for (auto layout : layouts) {
12227                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
12228                            skip_call |=
12229                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
12230                                        reinterpret_cast<uint64_t &>(queue), __LINE__, VALIDATION_ERROR_01964, "DS",
12231                                        "Images passed to present must be in layout "
12232                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s. %s",
12233                                        string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_01964]);
12234                        }
12235                    }
12236                }
12237            }
12238
12239            // All physical devices and queue families are required to be able
12240            // to present to any native window on Android; require the
12241            // application to have established support on any other platform.
12242            if (!dev_data->instance_data->androidSurfaceExtensionEnabled) {
12243                auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
12244                auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
12245
12246                if (support_it == surface_state->gpu_queue_support.end()) {
12247                    skip_call |=
12248                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12249                                reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
12250                                DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS", "vkQueuePresentKHR: Presenting image without calling "
12251                                                                             "vkGetPhysicalDeviceSurfaceSupportKHR");
12252                } else if (!support_it->second) {
12253                    skip_call |=
12254                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12255                                reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_01961,
12256                                "DS", "vkQueuePresentKHR: Presenting image on queue that cannot "
12257                                      "present to this surface. %s",
12258                                validation_error_map[VALIDATION_ERROR_01961]);
12259                }
12260            }
12261        }
12262    }
12263
12264    if (skip_call) {
12265        return VK_ERROR_VALIDATION_FAILED_EXT;
12266    }
12267
12268    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
12269
12270    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
12271        // Semaphore waits occur before error generation, if the call reached
12272        // the ICD. (Confirm?)
12273        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
12274            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
12275            if (pSemaphore) {
12276                pSemaphore->signaler.first = VK_NULL_HANDLE;
12277                pSemaphore->signaled = false;
12278            }
12279        }
12280
12281        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
12282            // Note: this is imperfect, in that we can get confused about what
12283            // did or didn't succeed-- but if the app does that, it's confused
12284            // itself just as much.
12285            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
12286
12287            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR)
12288                continue; // this present didn't actually happen.
12289
12290            // Mark the image as having been released to the WSI
12291            auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
12292            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
12293            auto image_state = getImageState(dev_data, image);
12294            image_state->acquired = false;
12295        }
12296
12297        // Note: even though presentation is directed to a queue, there is no
12298        // direct ordering between QP and subsequent work, so QP (and its
12299        // semaphore waits) /never/ participate in any completion proof.
12300    }
12301
12302    return result;
12303}
12304
12305static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
12306                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
12307                                                     std::vector<SURFACE_STATE *> &surface_state,
12308                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
12309    if (pCreateInfos) {
12310        std::lock_guard<std::mutex> lock(global_lock);
12311        for (uint32_t i = 0; i < swapchainCount; i++) {
12312            surface_state.push_back(getSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
12313            old_swapchain_state.push_back(getSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
12314            std::stringstream func_name;
12315            func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
12316                if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i], old_swapchain_state[i])) {
12317                return true;
12318            }
12319        }
12320    }
12321    return false;
12322}
12323
12324static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
12325                                                    const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
12326                                                    std::vector<SURFACE_STATE *> &surface_state,
12327                                                    std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
12328    if (VK_SUCCESS == result) {
12329        for (uint32_t i = 0; i < swapchainCount; i++) {
12330            auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
12331            surface_state[i]->swapchain = swapchain_state.get();
12332            dev_data->device_extensions.swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
12333        }
12334    } else {
12335        for (uint32_t i = 0; i < swapchainCount; i++) {
12336            surface_state[i]->swapchain = nullptr;
12337        }
12338    }
12339    // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
12340    for (uint32_t i = 0; i < swapchainCount; i++) {
12341        if (old_swapchain_state[i]) {
12342            old_swapchain_state[i]->replaced = true;
12343        }
12344        surface_state[i]->old_swapchain = old_swapchain_state[i];
12345    }
12346    return;
12347}
12348
12349VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
12350                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
12351                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
12352    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12353    std::vector<SURFACE_STATE *> surface_state;
12354    std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
12355
12356    if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
12357                                                 old_swapchain_state)) {
12358        return VK_ERROR_VALIDATION_FAILED_EXT;
12359    }
12360
12361    VkResult result =
12362        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
12363
12364    PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
12365                                            old_swapchain_state);
12366
12367    return result;
12368}
12369
12370VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
12371                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
12372    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12373    bool skip_call = false;
12374
12375    std::unique_lock<std::mutex> lock(global_lock);
12376
12377    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
12378        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12379                             reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
12380                             "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
12381                             "to determine the completion of this operation.");
12382    }
12383
12384    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
12385    if (pSemaphore && pSemaphore->signaled) {
12386        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
12387                             reinterpret_cast<const uint64_t &>(semaphore), __LINE__, VALIDATION_ERROR_01952, "DS",
12388                             "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
12389                             validation_error_map[VALIDATION_ERROR_01952]);
12390    }
12391
12392    auto pFence = getFenceNode(dev_data, fence);
12393    if (pFence) {
12394        skip_call |= ValidateFenceForSubmit(dev_data, pFence);
12395    }
12396
12397    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
12398
12399    if (swapchain_data->replaced) {
12400        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12401                             reinterpret_cast<uint64_t &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
12402                             "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
12403                             "present any images it has acquired, but cannot acquire any more.");
12404    }
12405
12406    auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
12407    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
12408        uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
12409                                                 [=](VkImage image) { return getImageState(dev_data, image)->acquired; });
12410        if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
12411            skip_call |=
12412                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12413                        reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
12414                        "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
12415                        acquired_images);
12416        }
12417    }
12418    lock.unlock();
12419
12420    if (skip_call)
12421        return VK_ERROR_VALIDATION_FAILED_EXT;
12422
12423    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
12424
12425    lock.lock();
12426    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
12427        if (pFence) {
12428            pFence->state = FENCE_INFLIGHT;
12429            pFence->signaler.first = VK_NULL_HANDLE;   // ANI isn't on a queue, so this can't participate in a completion proof.
12430        }
12431
12432        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
12433        if (pSemaphore) {
12434            pSemaphore->signaled = true;
12435            pSemaphore->signaler.first = VK_NULL_HANDLE;
12436        }
12437
12438        // Mark the image as acquired.
12439        auto image = swapchain_data->images[*pImageIndex];
12440        auto image_state = getImageState(dev_data, image);
12441        image_state->acquired = true;
12442    }
12443    lock.unlock();
12444
12445    return result;
12446}
12447
12448VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
12449                                                        VkPhysicalDevice *pPhysicalDevices) {
12450    bool skip_call = false;
12451    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12452    assert(instance_data);
12453
12454    // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
12455    if (NULL == pPhysicalDevices) {
12456        instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
12457    } else {
12458        if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
12459            // Flag warning here. You can call this without having queried the count, but it may not be
12460            // robust on platforms with multiple physical devices.
12461            skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12462                                 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
12463                                 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
12464                                 "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
12465        } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
12466        else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
12467            // Having actual count match count from app is not a requirement, so this can be a warning
12468            skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12469                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12470                                 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
12471                                 "supported by this instance is %u.",
12472                                 *pPhysicalDeviceCount, instance_data->physical_devices_count);
12473        }
12474        instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
12475    }
12476    if (skip_call) {
12477        return VK_ERROR_VALIDATION_FAILED_EXT;
12478    }
12479    VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
12480    if (NULL == pPhysicalDevices) {
12481        instance_data->physical_devices_count = *pPhysicalDeviceCount;
12482    } else if (result == VK_SUCCESS) { // Save physical devices
12483        for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
12484            auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
12485            phys_device_state.phys_device = pPhysicalDevices[i];
12486            // Init actual features for each physical device
12487            instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
12488        }
12489    }
12490    return result;
12491}
12492
12493VKAPI_ATTR void VKAPI_CALL
12494GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12495    VkQueueFamilyProperties *pQueueFamilyProperties) {
12496    bool skip_call = false;
12497    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12498    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12499    if (physical_device_state) {
12500        if (!pQueueFamilyProperties) {
12501            physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
12502        }
12503        else {
12504            // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
12505            // get count
12506            if (UNCALLED == physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
12507                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12508                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
12509                    "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
12510                    "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
12511                    "NULL pQueueFamilyProperties to query pCount.");
12512            }
12513            // Then verify that pCount that is passed in on second call matches what was returned
12514            if (physical_device_state->queueFamilyPropertiesCount != *pCount) {
12515
12516                // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
12517                // provide as warning
12518                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12519                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12520                    "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
12521                    "supported by this physicalDevice is %u.",
12522                    *pCount, physical_device_state->queueFamilyPropertiesCount);
12523            }
12524            physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
12525        }
12526        if (skip_call) {
12527            return;
12528        }
12529        instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
12530        if (!pQueueFamilyProperties) {
12531            physical_device_state->queueFamilyPropertiesCount = *pCount;
12532        }
12533        else { // Save queue family properties
12534            if (physical_device_state->queue_family_properties.size() < *pCount)
12535                physical_device_state->queue_family_properties.resize(*pCount);
12536            for (uint32_t i = 0; i < *pCount; i++) {
12537                physical_device_state->queue_family_properties[i] = pQueueFamilyProperties[i];
12538            }
12539        }
12540    }
12541    else {
12542        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
12543                __LINE__, VALIDATION_ERROR_00028, "DL",
12544                "Invalid physicalDevice (0x%p) passed into vkGetPhysicalDeviceQueueFamilyProperties(). %s", physicalDevice,
12545                validation_error_map[VALIDATION_ERROR_00028]);
12546    }
12547}
12548
12549template<typename TCreateInfo, typename FPtr>
12550static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo,
12551                              VkAllocationCallbacks const *pAllocator, VkSurfaceKHR *pSurface,
12552                              FPtr fptr)
12553{
12554    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12555
12556    // Call down the call chain:
12557    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
12558
12559    if (result == VK_SUCCESS) {
12560        std::unique_lock<std::mutex> lock(global_lock);
12561        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
12562        lock.unlock();
12563    }
12564
12565    return result;
12566}
12567
12568VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
12569    bool skip_call = false;
12570    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12571    std::unique_lock<std::mutex> lock(global_lock);
12572    auto surface_state = getSurfaceState(instance_data, surface);
12573
12574    if (surface_state) {
12575        // TODO: track swapchains created from this surface.
12576        instance_data->surface_map.erase(surface);
12577    }
12578    lock.unlock();
12579
12580    if (!skip_call) {
12581        // Call down the call chain:
12582        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
12583    }
12584}
12585
12586VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
12587                                                            const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12588    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
12589}
12590
12591#ifdef VK_USE_PLATFORM_ANDROID_KHR
12592VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
12593                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12594    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
12595}
12596#endif // VK_USE_PLATFORM_ANDROID_KHR
12597
12598#ifdef VK_USE_PLATFORM_MIR_KHR
12599VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
12600                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12601    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
12602}
12603#endif // VK_USE_PLATFORM_MIR_KHR
12604
12605#ifdef VK_USE_PLATFORM_WAYLAND_KHR
12606VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
12607                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12608    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
12609}
12610#endif // VK_USE_PLATFORM_WAYLAND_KHR
12611
12612#ifdef VK_USE_PLATFORM_WIN32_KHR
12613VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
12614                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12615    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
12616}
12617#endif // VK_USE_PLATFORM_WIN32_KHR
12618
12619#ifdef VK_USE_PLATFORM_XCB_KHR
12620VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
12621                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12622    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
12623}
12624#endif // VK_USE_PLATFORM_XCB_KHR
12625
12626#ifdef VK_USE_PLATFORM_XLIB_KHR
12627VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
12628                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12629    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
12630}
12631#endif // VK_USE_PLATFORM_XLIB_KHR
12632
12633
12634VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12635                                                                       VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
12636    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12637
12638    std::unique_lock<std::mutex> lock(global_lock);
12639    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12640    lock.unlock();
12641
12642    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface,
12643                                                                                        pSurfaceCapabilities);
12644
12645    if (result == VK_SUCCESS) {
12646        physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
12647        physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
12648    }
12649
12650    return result;
12651}
12652
12653
12654VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
12655                                                                  VkSurfaceKHR surface, VkBool32 *pSupported) {
12656    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12657    std::unique_lock<std::mutex> lock(global_lock);
12658    auto surface_state = getSurfaceState(instance_data, surface);
12659    lock.unlock();
12660
12661    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface,
12662                                                                                   pSupported);
12663
12664    if (result == VK_SUCCESS) {
12665        surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported != 0);
12666    }
12667
12668    return result;
12669}
12670
12671
12672VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12673                                                                       uint32_t *pPresentModeCount,
12674                                                                       VkPresentModeKHR *pPresentModes) {
12675    bool skip_call = false;
12676    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12677    std::unique_lock<std::mutex> lock(global_lock);
12678    // TODO: this isn't quite right. available modes may differ by surface AND physical device.
12679    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12680    auto & call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
12681
12682    if (pPresentModes) {
12683        // Compare the preliminary value of *pPresentModeCount with the value this time:
12684        auto prev_mode_count = (uint32_t) physical_device_state->present_modes.size();
12685        switch (call_state) {
12686        case UNCALLED:
12687            skip_call |= log_msg(
12688                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12689                reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
12690                "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
12691                "value has been seen for pPresentModeCount.");
12692            break;
12693        default:
12694            // both query count and query details
12695            if (*pPresentModeCount != prev_mode_count) {
12696                skip_call |= log_msg(
12697                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12698                        reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12699                        "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs from the value "
12700                        "(%u) that was returned when pPresentModes was NULL.",
12701                        *pPresentModeCount, prev_mode_count);
12702            }
12703            break;
12704        }
12705    }
12706    lock.unlock();
12707
12708    if (skip_call)
12709        return VK_ERROR_VALIDATION_FAILED_EXT;
12710
12711    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount, pPresentModes);
12712
12713    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12714
12715        lock.lock();
12716
12717        if (*pPresentModeCount) {
12718            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12719            if (*pPresentModeCount > physical_device_state->present_modes.size())
12720                physical_device_state->present_modes.resize(*pPresentModeCount);
12721        }
12722        if (pPresentModes) {
12723            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12724            for (uint32_t i = 0; i < *pPresentModeCount; i++) {
12725                physical_device_state->present_modes[i] = pPresentModes[i];
12726            }
12727        }
12728    }
12729
12730    return result;
12731}
12732
12733
12734VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12735                                                                  uint32_t *pSurfaceFormatCount,
12736                                                                  VkSurfaceFormatKHR *pSurfaceFormats) {
12737    bool skip_call = false;
12738    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12739    std::unique_lock<std::mutex> lock(global_lock);
12740    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12741    auto & call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
12742
12743    if (pSurfaceFormats) {
12744        auto prev_format_count = (uint32_t) physical_device_state->surface_formats.size();
12745
12746        switch (call_state) {
12747        case UNCALLED:
12748            // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't
12749            // previously call this function with a NULL value of pSurfaceFormats:
12750            skip_call |= log_msg(
12751                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12752                reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
12753                "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
12754                "value has been seen for pSurfaceFormats.");
12755            break;
12756        default:
12757            if (prev_format_count != *pSurfaceFormatCount) {
12758                skip_call |= log_msg(
12759                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12760                        reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12761                        "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats set to "
12762                        "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
12763                        *pSurfaceFormatCount, prev_format_count);
12764            }
12765            break;
12766        }
12767    }
12768    lock.unlock();
12769
12770    if (skip_call)
12771        return VK_ERROR_VALIDATION_FAILED_EXT;
12772
12773    // Call down the call chain:
12774    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
12775                                                                                   pSurfaceFormats);
12776
12777    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12778
12779        lock.lock();
12780
12781        if (*pSurfaceFormatCount) {
12782            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12783            if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
12784                physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
12785        }
12786        if (pSurfaceFormats) {
12787            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12788            for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
12789                physical_device_state->surface_formats[i] = pSurfaceFormats[i];
12790            }
12791        }
12792    }
12793    return result;
12794}
12795
12796
12797VKAPI_ATTR VkResult VKAPI_CALL
12798CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12799                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
12800    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12801    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
12802    if (VK_SUCCESS == res) {
12803        std::lock_guard<std::mutex> lock(global_lock);
12804        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
12805    }
12806    return res;
12807}
12808
12809VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
12810                                                         VkDebugReportCallbackEXT msgCallback,
12811                                                         const VkAllocationCallbacks *pAllocator) {
12812    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12813    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
12814    std::lock_guard<std::mutex> lock(global_lock);
12815    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
12816}
12817
12818VKAPI_ATTR void VKAPI_CALL
12819DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
12820                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
12821    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12822    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
12823}
12824
12825VKAPI_ATTR VkResult VKAPI_CALL
12826EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12827    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12828}
12829
12830VKAPI_ATTR VkResult VKAPI_CALL
12831EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
12832    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12833}
12834
12835VKAPI_ATTR VkResult VKAPI_CALL
12836EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
12837    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12838        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
12839
12840    return VK_ERROR_LAYER_NOT_PRESENT;
12841}
12842
12843VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12844                                                                  const char *pLayerName, uint32_t *pCount,
12845                                                                  VkExtensionProperties *pProperties) {
12846    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12847        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
12848
12849    assert(physicalDevice);
12850
12851    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12852    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
12853}
12854
12855static PFN_vkVoidFunction
12856intercept_core_instance_command(const char *name);
12857
12858static PFN_vkVoidFunction
12859intercept_core_device_command(const char *name);
12860
12861static PFN_vkVoidFunction
12862intercept_khr_swapchain_command(const char *name, VkDevice dev);
12863
12864static PFN_vkVoidFunction
12865intercept_khr_surface_command(const char *name, VkInstance instance);
12866
12867VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
12868    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
12869    if (proc)
12870        return proc;
12871
12872    assert(dev);
12873
12874    proc = intercept_khr_swapchain_command(funcName, dev);
12875    if (proc)
12876        return proc;
12877
12878    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
12879
12880    auto &table = dev_data->dispatch_table;
12881    if (!table.GetDeviceProcAddr)
12882        return nullptr;
12883    return table.GetDeviceProcAddr(dev, funcName);
12884}
12885
12886VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
12887    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
12888    if (!proc)
12889        proc = intercept_core_device_command(funcName);
12890    if (!proc)
12891        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
12892    if (!proc)
12893        proc = intercept_khr_surface_command(funcName, instance);
12894    if (proc)
12895        return proc;
12896
12897    assert(instance);
12898
12899    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12900    proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
12901    if (proc)
12902        return proc;
12903
12904    auto &table = instance_data->dispatch_table;
12905    if (!table.GetInstanceProcAddr)
12906        return nullptr;
12907    return table.GetInstanceProcAddr(instance, funcName);
12908}
12909
12910static PFN_vkVoidFunction
12911intercept_core_instance_command(const char *name) {
12912    static const struct {
12913        const char *name;
12914        PFN_vkVoidFunction proc;
12915    } core_instance_commands[] = {
12916        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
12917        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
12918        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
12919        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
12920        { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
12921        { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
12922        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
12923        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
12924        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
12925        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
12926        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
12927    };
12928
12929    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
12930        if (!strcmp(core_instance_commands[i].name, name))
12931            return core_instance_commands[i].proc;
12932    }
12933
12934    return nullptr;
12935}
12936
12937static PFN_vkVoidFunction
12938intercept_core_device_command(const char *name) {
12939    static const struct {
12940        const char *name;
12941        PFN_vkVoidFunction proc;
12942    } core_device_commands[] = {
12943        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
12944        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
12945        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
12946        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
12947        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
12948        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
12949        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
12950        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
12951        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
12952        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
12953        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
12954        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
12955        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
12956        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
12957        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
12958        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
12959        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
12960        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
12961        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
12962        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
12963        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
12964        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
12965        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
12966        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
12967        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
12968        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
12969        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
12970        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
12971        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
12972        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
12973        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
12974        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
12975        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
12976        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
12977        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
12978        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
12979        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
12980        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
12981        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
12982        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
12983        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
12984        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
12985        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
12986        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
12987        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
12988        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
12989        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
12990        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
12991        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
12992        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
12993        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
12994        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
12995        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
12996        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
12997        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
12998        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
12999        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
13000        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
13001        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
13002        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
13003        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
13004        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
13005        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
13006        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
13007        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
13008        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
13009        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
13010        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
13011        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
13012        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
13013        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
13014        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
13015        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
13016        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
13017        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
13018        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
13019        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
13020        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
13021        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
13022        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
13023        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
13024        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
13025        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
13026        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
13027        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
13028        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
13029        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
13030        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
13031        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
13032        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
13033        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
13034        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
13035        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
13036        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
13037        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
13038        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
13039        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
13040        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
13041        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
13042        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
13043        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
13044        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
13045        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
13046        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
13047        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
13048        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
13049        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
13050        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
13051        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
13052        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
13053        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
13054        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
13055        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
13056        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
13057        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
13058        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
13059    };
13060
13061    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
13062        if (!strcmp(core_device_commands[i].name, name))
13063            return core_device_commands[i].proc;
13064    }
13065
13066    return nullptr;
13067}
13068
13069static PFN_vkVoidFunction
13070intercept_khr_swapchain_command(const char *name, VkDevice dev) {
13071    static const struct {
13072        const char *name;
13073        PFN_vkVoidFunction proc;
13074    } khr_swapchain_commands[] = {
13075        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
13076        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
13077        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
13078        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
13079        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
13080    };
13081    layer_data *dev_data = nullptr;
13082
13083    if (dev) {
13084        dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
13085        if (!dev_data->device_extensions.wsi_enabled)
13086            return nullptr;
13087    }
13088
13089    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
13090        if (!strcmp(khr_swapchain_commands[i].name, name))
13091            return khr_swapchain_commands[i].proc;
13092    }
13093
13094    if (dev_data) {
13095        if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
13096            return nullptr;
13097    }
13098
13099    if (!strcmp("vkCreateSharedSwapchainsKHR", name))
13100        return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
13101
13102    return nullptr;
13103}
13104
13105static PFN_vkVoidFunction
13106intercept_khr_surface_command(const char *name, VkInstance instance) {
13107    static const struct {
13108        const char *name;
13109        PFN_vkVoidFunction proc;
13110        bool instance_layer_data::*enable;
13111    } khr_surface_commands[] = {
13112#ifdef VK_USE_PLATFORM_ANDROID_KHR
13113        {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
13114            &instance_layer_data::androidSurfaceExtensionEnabled},
13115#endif // VK_USE_PLATFORM_ANDROID_KHR
13116#ifdef VK_USE_PLATFORM_MIR_KHR
13117        {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
13118            &instance_layer_data::mirSurfaceExtensionEnabled},
13119#endif // VK_USE_PLATFORM_MIR_KHR
13120#ifdef VK_USE_PLATFORM_WAYLAND_KHR
13121        {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
13122            &instance_layer_data::waylandSurfaceExtensionEnabled},
13123#endif // VK_USE_PLATFORM_WAYLAND_KHR
13124#ifdef VK_USE_PLATFORM_WIN32_KHR
13125        {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
13126            &instance_layer_data::win32SurfaceExtensionEnabled},
13127#endif // VK_USE_PLATFORM_WIN32_KHR
13128#ifdef VK_USE_PLATFORM_XCB_KHR
13129        {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
13130            &instance_layer_data::xcbSurfaceExtensionEnabled},
13131#endif // VK_USE_PLATFORM_XCB_KHR
13132#ifdef VK_USE_PLATFORM_XLIB_KHR
13133        {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
13134            &instance_layer_data::xlibSurfaceExtensionEnabled},
13135#endif // VK_USE_PLATFORM_XLIB_KHR
13136        { "vkCreateDisplayPlaneSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateDisplayPlaneSurfaceKHR),
13137            &instance_layer_data::displayExtensionEnabled},
13138        {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
13139            &instance_layer_data::surfaceExtensionEnabled},
13140        {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceCapabilitiesKHR),
13141            &instance_layer_data::surfaceExtensionEnabled},
13142        {"vkGetPhysicalDeviceSurfaceSupportKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceSupportKHR),
13143            &instance_layer_data::surfaceExtensionEnabled},
13144        {"vkGetPhysicalDeviceSurfacePresentModesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfacePresentModesKHR),
13145            &instance_layer_data::surfaceExtensionEnabled},
13146        {"vkGetPhysicalDeviceSurfaceFormatsKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceFormatsKHR),
13147            &instance_layer_data::surfaceExtensionEnabled},
13148    };
13149
13150    instance_layer_data *instance_data = nullptr;
13151    if (instance) {
13152        instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
13153    }
13154
13155    for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
13156        if (!strcmp(khr_surface_commands[i].name, name)) {
13157            if (instance_data && !(instance_data->*(khr_surface_commands[i].enable)))
13158                return nullptr;
13159            return khr_surface_commands[i].proc;
13160        }
13161    }
13162
13163    return nullptr;
13164}
13165
13166} // namespace core_validation
13167
13168// vk_layer_logging.h expects these to be defined
13169
13170VKAPI_ATTR VkResult VKAPI_CALL
13171vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
13172                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
13173    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
13174}
13175
13176VKAPI_ATTR void VKAPI_CALL
13177vkDestroyDebugReportCallbackEXT(VkInstance instance,
13178                                VkDebugReportCallbackEXT msgCallback,
13179                                const VkAllocationCallbacks *pAllocator) {
13180    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
13181}
13182
13183VKAPI_ATTR void VKAPI_CALL
13184vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
13185                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
13186    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
13187}
13188
13189// loader-layer interface v0, just wrappers since there is only a layer
13190
13191VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13192vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
13193    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
13194}
13195
13196VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13197vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
13198    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
13199}
13200
13201VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13202vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
13203    // the layer command handles VK_NULL_HANDLE just fine internally
13204    assert(physicalDevice == VK_NULL_HANDLE);
13205    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
13206}
13207
13208VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
13209                                                                                    const char *pLayerName, uint32_t *pCount,
13210                                                                                    VkExtensionProperties *pProperties) {
13211    // the layer command handles VK_NULL_HANDLE just fine internally
13212    assert(physicalDevice == VK_NULL_HANDLE);
13213    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
13214}
13215
13216VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
13217    return core_validation::GetDeviceProcAddr(dev, funcName);
13218}
13219
13220VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
13221    return core_validation::GetInstanceProcAddr(instance, funcName);
13222}
13223