core_validation.cpp revision f72f8ce7e4dc62b19ebe25d3f55cfa5cbf1f5d5e
1/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
34 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
39#include <SPIRV/spirv.hpp>
40#include <algorithm>
41#include <assert.h>
42#include <iostream>
43#include <list>
44#include <map>
45#include <mutex>
46#include <set>
47#include <sstream>
48#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
51#include <string>
52#include <tuple>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_enum_string_helper.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "core_validation.h"
64#include "buffer_validation.h"
65#include "vk_layer_table.h"
66#include "vk_layer_data.h"
67#include "vk_layer_extension_utils.h"
68#include "vk_layer_utils.h"
69#include "spirv-tools/libspirv.h"
70
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
75#define LOGCONSOLE(...)                                                                                                            \
76    {                                                                                                                              \
77        printf(__VA_ARGS__);                                                                                                       \
78        printf("\n");                                                                                                              \
79    }
80#endif
81
82// This intentionally includes a cpp file
83#include "vk_safe_struct.cpp"
84
85using namespace std;
86
87namespace core_validation {
88
89using std::unordered_map;
90using std::unordered_set;
91
92// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
93// Object value will be used to identify them internally.
94static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
95// 2nd special memory handle used to flag object as unbound from memory
96static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
97
98// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
99// by the extent of a swapchain targeting the surface.
100static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
101
102struct devExts {
103    bool wsi_enabled;
104    bool wsi_display_swapchain_enabled;
105    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
106    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
107};
108
109// fwd decls
110struct shader_module;
111
112struct instance_layer_data {
113    VkInstance instance = VK_NULL_HANDLE;
114    debug_report_data *report_data = nullptr;
115    std::vector<VkDebugReportCallbackEXT> logging_callback;
116    VkLayerInstanceDispatchTable dispatch_table;
117
118    CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
119    uint32_t physical_devices_count = 0;
120    CHECK_DISABLED disabled = {};
121
122    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
123    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
124
125    bool surfaceExtensionEnabled = false;
126    bool displayExtensionEnabled = false;
127    bool androidSurfaceExtensionEnabled = false;
128    bool mirSurfaceExtensionEnabled = false;
129    bool waylandSurfaceExtensionEnabled = false;
130    bool win32SurfaceExtensionEnabled = false;
131    bool xcbSurfaceExtensionEnabled = false;
132    bool xlibSurfaceExtensionEnabled = false;
133};
134
135struct layer_data {
136    debug_report_data *report_data = nullptr;
137    VkLayerDispatchTable dispatch_table;
138
139    devExts device_extensions = {};
140    unordered_set<VkQueue> queues;  // All queues under given device
141    // Global set of all cmdBuffers that are inFlight on this device
142    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
143    // Layer specific data
144    unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
145    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
146    unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
147    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
148    unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
149    unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
150    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
151    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
152    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
153    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
154    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
155    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
156    unordered_map<VkFence, FENCE_NODE> fenceMap;
157    unordered_map<VkQueue, QUEUE_STATE> queueMap;
158    unordered_map<VkEvent, EVENT_STATE> eventMap;
159    unordered_map<QueryObject, bool> queryToStateMap;
160    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
161    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
162    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
163    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
164    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
165    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
166    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
167    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
168
169    VkDevice device = VK_NULL_HANDLE;
170    VkPhysicalDevice physical_device = VK_NULL_HANDLE;
171
172    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
173
174    VkPhysicalDeviceFeatures enabled_features = {};
175    // Device specific data
176    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
177    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
178    VkPhysicalDeviceProperties phys_dev_props = {};
179};
180
181// TODO : Do we need to guard access to layer_data_map w/ lock?
182static unordered_map<void *, layer_data *> layer_data_map;
183static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
184
185static const VkLayerProperties global_layer = {
186    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
187};
188
189template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
190    bool foundLayer = false;
191    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
192        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
193            foundLayer = true;
194        }
195        // This has to be logged to console as we don't have a callback at this point.
196        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
197            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
198                       global_layer.layerName);
199        }
200    }
201}
202
203// Code imported from shader_checker
204static void build_def_index(shader_module *);
205
206// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
207// without the caller needing to care too much about the physical SPIRV module layout.
208struct spirv_inst_iter {
209    std::vector<uint32_t>::const_iterator zero;
210    std::vector<uint32_t>::const_iterator it;
211
212    uint32_t len() {
213        auto result = *it >> 16;
214        assert(result > 0);
215        return result;
216    }
217
218    uint32_t opcode() { return *it & 0x0ffffu; }
219
220    uint32_t const &word(unsigned n) {
221        assert(n < len());
222        return it[n];
223    }
224
225    uint32_t offset() { return (uint32_t)(it - zero); }
226
227    spirv_inst_iter() {}
228
229    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
230
231    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
232
233    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
234
235    spirv_inst_iter operator++(int) { // x++
236        spirv_inst_iter ii = *this;
237        it += len();
238        return ii;
239    }
240
241    spirv_inst_iter operator++() { // ++x;
242        it += len();
243        return *this;
244    }
245
246    // The iterator and the value are the same thing.
247    spirv_inst_iter &operator*() { return *this; }
248    spirv_inst_iter const &operator*() const { return *this; }
249};
250
251struct shader_module {
252    // The spirv image itself
253    vector<uint32_t> words;
254    // A mapping of <id> to the first word of its def. this is useful because walking type
255    // trees, constant expressions, etc requires jumping all over the instruction stream.
256    unordered_map<unsigned, unsigned> def_index;
257
258    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
259        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
260          def_index() {
261
262        build_def_index(this);
263    }
264
265    // Expose begin() / end() to enable range-based for
266    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } // First insn
267    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         // Just past last insn
268    // Given an offset into the module, produce an iterator there.
269    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
270
271    // Gets an iterator to the definition of an id
272    spirv_inst_iter get_def(unsigned id) const {
273        auto it = def_index.find(id);
274        if (it == def_index.end()) {
275            return end();
276        }
277        return at(it->second);
278    }
279};
280
281// TODO : This can be much smarter, using separate locks for separate global data
282static std::mutex global_lock;
283
284// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
285IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
286    auto iv_it = dev_data->imageViewMap.find(image_view);
287    if (iv_it == dev_data->imageViewMap.end()) {
288        return nullptr;
289    }
290    return iv_it->second.get();
291}
292// Return sampler node ptr for specified sampler or else NULL
293SAMPLER_STATE *getSamplerState(const layer_data *dev_data, VkSampler sampler) {
294    auto sampler_it = dev_data->samplerMap.find(sampler);
295    if (sampler_it == dev_data->samplerMap.end()) {
296        return nullptr;
297    }
298    return sampler_it->second.get();
299}
300// Return image state ptr for specified image or else NULL
301IMAGE_STATE *getImageState(const layer_data *dev_data, VkImage image) {
302    auto img_it = dev_data->imageMap.find(image);
303    if (img_it == dev_data->imageMap.end()) {
304        return nullptr;
305    }
306    return img_it->second.get();
307}
308// Return buffer state ptr for specified buffer or else NULL
309BUFFER_STATE *getBufferState(const layer_data *dev_data, VkBuffer buffer) {
310    auto buff_it = dev_data->bufferMap.find(buffer);
311    if (buff_it == dev_data->bufferMap.end()) {
312        return nullptr;
313    }
314    return buff_it->second.get();
315}
316// Return swapchain node for specified swapchain or else NULL
317SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
318    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
319    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
320        return nullptr;
321    }
322    return swp_it->second.get();
323}
324// Return swapchain for specified image or else NULL
325VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
326    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
327    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
328        return VK_NULL_HANDLE;
329    }
330    return img_it->second;
331}
332// Return buffer node ptr for specified buffer or else NULL
333BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
334    auto bv_it = my_data->bufferViewMap.find(buffer_view);
335    if (bv_it == my_data->bufferViewMap.end()) {
336        return nullptr;
337    }
338    return bv_it->second.get();
339}
340
341FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
342    auto it = dev_data->fenceMap.find(fence);
343    if (it == dev_data->fenceMap.end()) {
344        return nullptr;
345    }
346    return &it->second;
347}
348
349EVENT_STATE *getEventNode(layer_data *dev_data, VkEvent event) {
350    auto it = dev_data->eventMap.find(event);
351    if (it == dev_data->eventMap.end()) {
352        return nullptr;
353    }
354    return &it->second;
355}
356
357QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
358    auto it = dev_data->queryPoolMap.find(query_pool);
359    if (it == dev_data->queryPoolMap.end()) {
360        return nullptr;
361    }
362    return &it->second;
363}
364
365QUEUE_STATE *getQueueState(layer_data *dev_data, VkQueue queue) {
366    auto it = dev_data->queueMap.find(queue);
367    if (it == dev_data->queueMap.end()) {
368        return nullptr;
369    }
370    return &it->second;
371}
372
373SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
374    auto it = dev_data->semaphoreMap.find(semaphore);
375    if (it == dev_data->semaphoreMap.end()) {
376        return nullptr;
377    }
378    return &it->second;
379}
380
381COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
382    auto it = dev_data->commandPoolMap.find(pool);
383    if (it == dev_data->commandPoolMap.end()) {
384        return nullptr;
385    }
386    return &it->second;
387}
388
389PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
390    auto it = instance_data->physical_device_map.find(phys);
391    if (it == instance_data->physical_device_map.end()) {
392        return nullptr;
393    }
394    return &it->second;
395}
396
397SURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
398    auto it = instance_data->surface_map.find(surface);
399    if (it == instance_data->surface_map.end()) {
400        return nullptr;
401    }
402    return &it->second;
403}
404
405// Return ptr to memory binding for given handle of specified type
406static BINDABLE *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
407    switch (type) {
408    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
409        return getImageState(my_data, VkImage(handle));
410    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
411        return getBufferState(my_data, VkBuffer(handle));
412    default:
413        break;
414    }
415    return nullptr;
416}
417// prototype
418static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
419
420// Helper function to validate correct usage bits set for buffers or images
421//  Verify that (actual & desired) flags != 0 or,
422//   if strict is true, verify that (actual & desired) flags == desired
423//  In case of error, report it via dbg callbacks
424static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle,
425                                 VkDebugReportObjectTypeEXT obj_type, int32_t const msgCode, char const *ty_str,
426                                 char const *func_name, char const *usage_str) {
427    bool correct_usage = false;
428    bool skip_call = false;
429    if (strict)
430        correct_usage = ((actual & desired) == desired);
431    else
432        correct_usage = ((actual & desired) != 0);
433    if (!correct_usage) {
434        if (msgCode == -1) {
435            // TODO: Fix callers with msgCode == -1 to use correct validation checks.
436            skip_call =
437                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
438                        MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
439                                                            " used by %s. In this case, %s should have %s set during creation.",
440                        ty_str, obj_handle, func_name, ty_str, usage_str);
441        } else {
442            const char *valid_usage = (msgCode == -1) ? "" : validation_error_map[msgCode];
443            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, msgCode, "MEM",
444                                "Invalid usage flag for %s 0x%" PRIxLEAST64
445                                " used by %s. In this case, %s should have %s set during creation. %s",
446                                ty_str, obj_handle, func_name, ty_str, usage_str, valid_usage);
447        }
448    }
449    return skip_call;
450}
451
452// Helper function to validate usage flags for buffers
453// For given buffer_state send actual vs. desired usage off to helper above where
454//  an error will be flagged if usage is not correct
455static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFlags desired, VkBool32 strict,
456                                    int32_t const msgCode, char const *func_name, char const *usage_string) {
457    return validate_usage_flags(dev_data, image_state->createInfo.usage, desired, strict,
458                                reinterpret_cast<const uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
459                                msgCode, "image", func_name, usage_string);
460}
461
462// Helper function to validate usage flags for buffers
463// For given buffer_state send actual vs. desired usage off to helper above where
464//  an error will be flagged if usage is not correct
465static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_STATE const *buffer_state, VkFlags desired, VkBool32 strict,
466                                     int32_t const msgCode, char const *func_name, char const *usage_string) {
467    return validate_usage_flags(dev_data, buffer_state->createInfo.usage, desired, strict,
468                                reinterpret_cast<const uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
469                                msgCode, "buffer", func_name, usage_string);
470}
471
472// Return ptr to info in map container containing mem, or NULL if not found
473//  Calls to this function should be wrapped in mutex
474DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
475    auto mem_it = dev_data->memObjMap.find(mem);
476    if (mem_it == dev_data->memObjMap.end()) {
477        return NULL;
478    }
479    return mem_it->second.get();
480}
481
482static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
483                             const VkMemoryAllocateInfo *pAllocateInfo) {
484    assert(object != NULL);
485
486    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
487}
488
489// Helper function to print lowercase string of object type
490//  TODO: Unify string helper functions, this should really come out of a string helper if not there already
491static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
492    switch (type) {
493    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
494        return "image";
495    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
496        return "buffer";
497    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
498        return "image view";
499    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
500        return "buffer view";
501    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
502        return "swapchain";
503    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
504        return "descriptor set";
505    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
506        return "framebuffer";
507    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
508        return "event";
509    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
510        return "query pool";
511    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
512        return "descriptor pool";
513    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
514        return "command pool";
515    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
516        return "pipeline";
517    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
518        return "sampler";
519    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
520        return "renderpass";
521    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
522        return "device memory";
523    case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
524        return "semaphore";
525    default:
526        return "unknown";
527    }
528}
529
530// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
531static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
532                                  VkDebugReportObjectTypeEXT type, const char *functionName) {
533    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
534    if (mem_info) {
535        if (!mem_info->bound_ranges[bound_object_handle].valid) {
536            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
537                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
538                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
539                           ", please fill the memory before using.",
540                           functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
541        }
542    }
543    return false;
544}
545// For given image_state
546//  If mem is special swapchain key, then verify that image_state valid member is true
547//  Else verify that the image's bound memory range is valid
548static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
549    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
550        if (!image_state->valid) {
551            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
552                           reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
553                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
554                           functionName, reinterpret_cast<uint64_t &>(image_state->image));
555        }
556    } else {
557        return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image),
558                                     VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
559    }
560    return false;
561}
562// For given buffer_state, verify that the range it's bound to is valid
563static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
564    return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer),
565                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
566}
567// For the given memory allocation, set the range bound by the given handle object to the valid param value
568static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
569    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
570    if (mem_info) {
571        mem_info->bound_ranges[handle].valid = valid;
572    }
573}
574// For given image node
575//  If mem is special swapchain key, then set entire image_state to valid param value
576//  Else set the image's bound memory range to valid param value
577static void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
578    if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
579        image_state->valid = valid;
580    } else {
581        SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid);
582    }
583}
584// For given buffer node set the buffer's bound memory range to valid param value
585static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
586    SetMemoryValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer), valid);
587}
588// Find CB Info and add mem reference to list container
589// Find Mem Obj Info and add CB reference to list container
590static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
591                                              const char *apiName) {
592    bool skip_call = false;
593
594    // Skip validation if this image was created through WSI
595    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
596
597        // First update CB binding in MemObj mini CB list
598        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
599        if (pMemInfo) {
600            // Now update CBInfo's Mem reference list
601            GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, cb);
602            pMemInfo->cb_bindings.insert(cb_node);
603            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
604            if (cb_node) {
605                cb_node->memObjs.insert(mem);
606            }
607        }
608    }
609    return skip_call;
610}
611
612// Create binding link between given sampler and command buffer node
613void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
614    sampler_state->cb_bindings.insert(cb_node);
615    cb_node->object_bindings.insert(
616        {reinterpret_cast<uint64_t &>(sampler_state->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
617}
618
619// Create binding link between given image node and command buffer node
620void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
621    // Skip validation if this image was created through WSI
622    if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
623        // First update CB binding in MemObj mini CB list
624        for (auto mem_binding : image_state->GetBoundMemory()) {
625            DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
626            if (pMemInfo) {
627                pMemInfo->cb_bindings.insert(cb_node);
628                // Now update CBInfo's Mem reference list
629                cb_node->memObjs.insert(mem_binding);
630            }
631        }
632        // Now update cb binding for image
633        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
634        image_state->cb_bindings.insert(cb_node);
635    }
636}
637
638// Create binding link between given image view node and its image with command buffer node
639void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
640    // First add bindings for imageView
641    view_state->cb_bindings.insert(cb_node);
642    cb_node->object_bindings.insert(
643        {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
644    auto image_state = getImageState(dev_data, view_state->create_info.image);
645    // Add bindings for image within imageView
646    if (image_state) {
647        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
648    }
649}
650
651// Create binding link between given buffer node and command buffer node
652void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
653    // First update CB binding in MemObj mini CB list
654    for (auto mem_binding : buffer_state->GetBoundMemory()) {
655        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
656        if (pMemInfo) {
657            pMemInfo->cb_bindings.insert(cb_node);
658            // Now update CBInfo's Mem reference list
659            cb_node->memObjs.insert(mem_binding);
660        }
661    }
662    // Now update cb binding for buffer
663    cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
664    buffer_state->cb_bindings.insert(cb_node);
665}
666
667// Create binding link between given buffer view node and its buffer with command buffer node
668void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
669    // First add bindings for bufferView
670    view_state->cb_bindings.insert(cb_node);
671    cb_node->object_bindings.insert(
672        {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
673    auto buffer_state = getBufferState(dev_data, view_state->create_info.buffer);
674    // Add bindings for buffer within bufferView
675    if (buffer_state) {
676        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
677    }
678}
679
680// For every mem obj bound to particular CB, free bindings related to that CB
681static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
682    if (cb_node) {
683        if (cb_node->memObjs.size() > 0) {
684            for (auto mem : cb_node->memObjs) {
685                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
686                if (pInfo) {
687                    pInfo->cb_bindings.erase(cb_node);
688                }
689            }
690            cb_node->memObjs.clear();
691        }
692        cb_node->validate_functions.clear();
693    }
694}
695// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
696static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
697    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
698}
699
700// Clear a single object binding from given memory object, or report error if binding is missing
701static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory mem) {
702    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
703    // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
704    if (mem_info) {
705        mem_info->obj_bindings.erase({handle, type});
706    }
707    return false;
708}
709
710// ClearMemoryObjectBindings clears the binding of objects to memory
711//  For the given object it pulls the memory bindings and makes sure that the bindings
712//  no longer refer to the object being cleared. This occurs when objects are destroyed.
713static bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
714    bool skip = false;
715    BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
716    if (mem_binding) {
717        if (!mem_binding->sparse) {
718            skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
719        } else { // Sparse, clear all bindings
720            for (auto& sparse_mem_binding : mem_binding->sparse_bindings) {
721                skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
722            }
723        }
724    }
725    return skip;
726}
727
728// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
729bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
730                              const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
731    bool result = false;
732    if (VK_NULL_HANDLE == mem) {
733        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
734                         __LINE__, error_code, "MEM",
735                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling "
736                         "vkBind%sMemory(). %s",
737                         api_name, type_name, handle, type_name, validation_error_map[error_code]);
738    } else if (MEMORY_UNBOUND == mem) {
739        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
740                         __LINE__, error_code, "MEM",
741                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. "
742                         "Memory must not be freed prior to this operation. %s",
743                         api_name, type_name, handle, validation_error_map[error_code]);
744    }
745    return result;
746}
747
748// Check to see if memory was ever bound to this image
749bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
750                                  UNIQUE_VALIDATION_ERROR_CODE error_code) {
751    bool result = false;
752    if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
753        result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem,
754                                          reinterpret_cast<const uint64_t &>(image_state->image), api_name, "Image", error_code);
755    }
756    return result;
757}
758
759// Check to see if memory was bound to this buffer
760bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
761                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
762    bool result = false;
763    if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
764        result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem,
765                                          reinterpret_cast<const uint64_t &>(buffer_state->buffer), api_name, "Buffer", error_code);
766    }
767    return result;
768}
769
770// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object
771// For NULL mem case, output warning
772// Make sure given object is in global object map
773//  IF a previous binding existed, output validation error
774//  Otherwise, add reference from objectInfo to memoryInfo
775//  Add reference off of objInfo
776// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
777static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
778                          const char *apiName) {
779    bool skip_call = false;
780    // It's an error to bind an object to NULL memory
781    if (mem != VK_NULL_HANDLE) {
782        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
783        assert(mem_binding);
784        // TODO : Add check here to make sure object isn't sparse
785        //  VALIDATION_ERROR_00792 for buffers
786        //  VALIDATION_ERROR_00804 for images
787        assert(!mem_binding->sparse);
788        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
789        if (mem_info) {
790            DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, mem_binding->binding.mem);
791            if (prev_binding) {
792                // TODO: VALIDATION_ERROR_00791 and VALIDATION_ERROR_00803
793                skip_call |=
794                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
795                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
796                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
797                            ") which has already been bound to mem object 0x%" PRIxLEAST64,
798                            apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
799            } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
800                skip_call |=
801                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
802                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
803                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
804                            ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
805                            "Vulkan so this attempt to bind to new memory is not allowed.",
806                            apiName, reinterpret_cast<uint64_t &>(mem), handle);
807            } else {
808                mem_info->obj_bindings.insert({handle, type});
809                // For image objects, make sure default memory state is correctly set
810                // TODO : What's the best/correct way to handle this?
811                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
812                    auto const image_state = getImageState(dev_data, VkImage(handle));
813                    if (image_state) {
814                        VkImageCreateInfo ici = image_state->createInfo;
815                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
816                            // TODO::  More memory state transition stuff.
817                        }
818                    }
819                }
820                mem_binding->binding.mem = mem;
821            }
822        }
823    }
824    return skip_call;
825}
826
827// For NULL mem case, clear any previous binding Else...
828// Make sure given object is in its object map
829//  IF a previous binding existed, update binding
830//  Add reference from objectInfo to memoryInfo
831//  Add reference off of object's binding info
832// Return VK_TRUE if addition is successful, VK_FALSE otherwise
833static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VkDebugReportObjectTypeEXT type,
834                                const char *apiName) {
835    bool skip_call = VK_FALSE;
836    // Handle NULL case separately, just clear previous binding & decrement reference
837    if (binding.mem == VK_NULL_HANDLE) {
838        // TODO : This should cause the range of the resource to be unbound according to spec
839    } else {
840        BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
841        assert(mem_binding);
842        assert(mem_binding->sparse);
843        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, binding.mem);
844        if (mem_info) {
845            mem_info->obj_bindings.insert({handle, type});
846            // Need to set mem binding for this object
847            mem_binding->sparse_bindings.insert(binding);
848        }
849    }
850    return skip_call;
851}
852
853// Return a string representation of CMD_TYPE enum
854static string cmdTypeToString(CMD_TYPE cmd) {
855    switch (cmd) {
856    case CMD_BINDPIPELINE:
857        return "CMD_BINDPIPELINE";
858    case CMD_BINDPIPELINEDELTA:
859        return "CMD_BINDPIPELINEDELTA";
860    case CMD_SETVIEWPORTSTATE:
861        return "CMD_SETVIEWPORTSTATE";
862    case CMD_SETLINEWIDTHSTATE:
863        return "CMD_SETLINEWIDTHSTATE";
864    case CMD_SETDEPTHBIASSTATE:
865        return "CMD_SETDEPTHBIASSTATE";
866    case CMD_SETBLENDSTATE:
867        return "CMD_SETBLENDSTATE";
868    case CMD_SETDEPTHBOUNDSSTATE:
869        return "CMD_SETDEPTHBOUNDSSTATE";
870    case CMD_SETSTENCILREADMASKSTATE:
871        return "CMD_SETSTENCILREADMASKSTATE";
872    case CMD_SETSTENCILWRITEMASKSTATE:
873        return "CMD_SETSTENCILWRITEMASKSTATE";
874    case CMD_SETSTENCILREFERENCESTATE:
875        return "CMD_SETSTENCILREFERENCESTATE";
876    case CMD_BINDDESCRIPTORSETS:
877        return "CMD_BINDDESCRIPTORSETS";
878    case CMD_BINDINDEXBUFFER:
879        return "CMD_BINDINDEXBUFFER";
880    case CMD_BINDVERTEXBUFFER:
881        return "CMD_BINDVERTEXBUFFER";
882    case CMD_DRAW:
883        return "CMD_DRAW";
884    case CMD_DRAWINDEXED:
885        return "CMD_DRAWINDEXED";
886    case CMD_DRAWINDIRECT:
887        return "CMD_DRAWINDIRECT";
888    case CMD_DRAWINDEXEDINDIRECT:
889        return "CMD_DRAWINDEXEDINDIRECT";
890    case CMD_DISPATCH:
891        return "CMD_DISPATCH";
892    case CMD_DISPATCHINDIRECT:
893        return "CMD_DISPATCHINDIRECT";
894    case CMD_COPYBUFFER:
895        return "CMD_COPYBUFFER";
896    case CMD_COPYIMAGE:
897        return "CMD_COPYIMAGE";
898    case CMD_BLITIMAGE:
899        return "CMD_BLITIMAGE";
900    case CMD_COPYBUFFERTOIMAGE:
901        return "CMD_COPYBUFFERTOIMAGE";
902    case CMD_COPYIMAGETOBUFFER:
903        return "CMD_COPYIMAGETOBUFFER";
904    case CMD_CLONEIMAGEDATA:
905        return "CMD_CLONEIMAGEDATA";
906    case CMD_UPDATEBUFFER:
907        return "CMD_UPDATEBUFFER";
908    case CMD_FILLBUFFER:
909        return "CMD_FILLBUFFER";
910    case CMD_CLEARCOLORIMAGE:
911        return "CMD_CLEARCOLORIMAGE";
912    case CMD_CLEARATTACHMENTS:
913        return "CMD_CLEARCOLORATTACHMENT";
914    case CMD_CLEARDEPTHSTENCILIMAGE:
915        return "CMD_CLEARDEPTHSTENCILIMAGE";
916    case CMD_RESOLVEIMAGE:
917        return "CMD_RESOLVEIMAGE";
918    case CMD_SETEVENT:
919        return "CMD_SETEVENT";
920    case CMD_RESETEVENT:
921        return "CMD_RESETEVENT";
922    case CMD_WAITEVENTS:
923        return "CMD_WAITEVENTS";
924    case CMD_PIPELINEBARRIER:
925        return "CMD_PIPELINEBARRIER";
926    case CMD_BEGINQUERY:
927        return "CMD_BEGINQUERY";
928    case CMD_ENDQUERY:
929        return "CMD_ENDQUERY";
930    case CMD_RESETQUERYPOOL:
931        return "CMD_RESETQUERYPOOL";
932    case CMD_COPYQUERYPOOLRESULTS:
933        return "CMD_COPYQUERYPOOLRESULTS";
934    case CMD_WRITETIMESTAMP:
935        return "CMD_WRITETIMESTAMP";
936    case CMD_INITATOMICCOUNTERS:
937        return "CMD_INITATOMICCOUNTERS";
938    case CMD_LOADATOMICCOUNTERS:
939        return "CMD_LOADATOMICCOUNTERS";
940    case CMD_SAVEATOMICCOUNTERS:
941        return "CMD_SAVEATOMICCOUNTERS";
942    case CMD_BEGINRENDERPASS:
943        return "CMD_BEGINRENDERPASS";
944    case CMD_ENDRENDERPASS:
945        return "CMD_ENDRENDERPASS";
946    default:
947        return "UNKNOWN";
948    }
949}
950
951// SPIRV utility functions
952static void build_def_index(shader_module *module) {
953    for (auto insn : *module) {
954        switch (insn.opcode()) {
955        // Types
956        case spv::OpTypeVoid:
957        case spv::OpTypeBool:
958        case spv::OpTypeInt:
959        case spv::OpTypeFloat:
960        case spv::OpTypeVector:
961        case spv::OpTypeMatrix:
962        case spv::OpTypeImage:
963        case spv::OpTypeSampler:
964        case spv::OpTypeSampledImage:
965        case spv::OpTypeArray:
966        case spv::OpTypeRuntimeArray:
967        case spv::OpTypeStruct:
968        case spv::OpTypeOpaque:
969        case spv::OpTypePointer:
970        case spv::OpTypeFunction:
971        case spv::OpTypeEvent:
972        case spv::OpTypeDeviceEvent:
973        case spv::OpTypeReserveId:
974        case spv::OpTypeQueue:
975        case spv::OpTypePipe:
976            module->def_index[insn.word(1)] = insn.offset();
977            break;
978
979        // Fixed constants
980        case spv::OpConstantTrue:
981        case spv::OpConstantFalse:
982        case spv::OpConstant:
983        case spv::OpConstantComposite:
984        case spv::OpConstantSampler:
985        case spv::OpConstantNull:
986            module->def_index[insn.word(2)] = insn.offset();
987            break;
988
989        // Specialization constants
990        case spv::OpSpecConstantTrue:
991        case spv::OpSpecConstantFalse:
992        case spv::OpSpecConstant:
993        case spv::OpSpecConstantComposite:
994        case spv::OpSpecConstantOp:
995            module->def_index[insn.word(2)] = insn.offset();
996            break;
997
998        // Variables
999        case spv::OpVariable:
1000            module->def_index[insn.word(2)] = insn.offset();
1001            break;
1002
1003        // Functions
1004        case spv::OpFunction:
1005            module->def_index[insn.word(2)] = insn.offset();
1006            break;
1007
1008        default:
1009            // We don't care about any other defs for now.
1010            break;
1011        }
1012    }
1013}
1014
1015static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1016    for (auto insn : *src) {
1017        if (insn.opcode() == spv::OpEntryPoint) {
1018            auto entrypointName = (char const *)&insn.word(3);
1019            auto entrypointStageBits = 1u << insn.word(1);
1020
1021            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1022                return insn;
1023            }
1024        }
1025    }
1026
1027    return src->end();
1028}
1029
1030static char const *storage_class_name(unsigned sc) {
1031    switch (sc) {
1032    case spv::StorageClassInput:
1033        return "input";
1034    case spv::StorageClassOutput:
1035        return "output";
1036    case spv::StorageClassUniformConstant:
1037        return "const uniform";
1038    case spv::StorageClassUniform:
1039        return "uniform";
1040    case spv::StorageClassWorkgroup:
1041        return "workgroup local";
1042    case spv::StorageClassCrossWorkgroup:
1043        return "workgroup global";
1044    case spv::StorageClassPrivate:
1045        return "private global";
1046    case spv::StorageClassFunction:
1047        return "function";
1048    case spv::StorageClassGeneric:
1049        return "generic";
1050    case spv::StorageClassAtomicCounter:
1051        return "atomic counter";
1052    case spv::StorageClassImage:
1053        return "image";
1054    case spv::StorageClassPushConstant:
1055        return "push constant";
1056    default:
1057        return "unknown";
1058    }
1059}
1060
1061// Get the value of an integral constant
1062unsigned get_constant_value(shader_module const *src, unsigned id) {
1063    auto value = src->get_def(id);
1064    assert(value != src->end());
1065
1066    if (value.opcode() != spv::OpConstant) {
1067        // TODO: Either ensure that the specialization transform is already performed on a module we're
1068        //       considering here, OR -- specialize on the fly now.
1069        return 1;
1070    }
1071
1072    return value.word(3);
1073}
1074
1075
1076static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1077    auto insn = src->get_def(type);
1078    assert(insn != src->end());
1079
1080    switch (insn.opcode()) {
1081    case spv::OpTypeBool:
1082        ss << "bool";
1083        break;
1084    case spv::OpTypeInt:
1085        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1086        break;
1087    case spv::OpTypeFloat:
1088        ss << "float" << insn.word(2);
1089        break;
1090    case spv::OpTypeVector:
1091        ss << "vec" << insn.word(3) << " of ";
1092        describe_type_inner(ss, src, insn.word(2));
1093        break;
1094    case spv::OpTypeMatrix:
1095        ss << "mat" << insn.word(3) << " of ";
1096        describe_type_inner(ss, src, insn.word(2));
1097        break;
1098    case spv::OpTypeArray:
1099        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1100        describe_type_inner(ss, src, insn.word(2));
1101        break;
1102    case spv::OpTypePointer:
1103        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1104        describe_type_inner(ss, src, insn.word(3));
1105        break;
1106    case spv::OpTypeStruct: {
1107        ss << "struct of (";
1108        for (unsigned i = 2; i < insn.len(); i++) {
1109            describe_type_inner(ss, src, insn.word(i));
1110            if (i == insn.len() - 1) {
1111                ss << ")";
1112            } else {
1113                ss << ", ";
1114            }
1115        }
1116        break;
1117    }
1118    case spv::OpTypeSampler:
1119        ss << "sampler";
1120        break;
1121    case spv::OpTypeSampledImage:
1122        ss << "sampler+";
1123        describe_type_inner(ss, src, insn.word(2));
1124        break;
1125    case spv::OpTypeImage:
1126        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1127        break;
1128    default:
1129        ss << "oddtype";
1130        break;
1131    }
1132}
1133
1134
1135static std::string describe_type(shader_module const *src, unsigned type) {
1136    std::ostringstream ss;
1137    describe_type_inner(ss, src, type);
1138    return ss.str();
1139}
1140
1141
1142static bool is_narrow_numeric_type(spirv_inst_iter type)
1143{
1144    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1145        return false;
1146    return type.word(2) < 64;
1147}
1148
1149
1150static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1151    // Walk two type trees together, and complain about differences
1152    auto a_insn = a->get_def(a_type);
1153    auto b_insn = b->get_def(b_type);
1154    assert(a_insn != a->end());
1155    assert(b_insn != b->end());
1156
1157    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1158        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1159    }
1160
1161    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1162        // We probably just found the extra level of arrayness in b_type: compare the type inside it to a_type
1163        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1164    }
1165
1166    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1167        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1168    }
1169
1170    if (a_insn.opcode() != b_insn.opcode()) {
1171        return false;
1172    }
1173
1174    if (a_insn.opcode() == spv::OpTypePointer) {
1175        // Match on pointee type. storage class is expected to differ
1176        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1177    }
1178
1179    if (a_arrayed || b_arrayed) {
1180        // If we havent resolved array-of-verts by here, we're not going to.
1181        return false;
1182    }
1183
1184    switch (a_insn.opcode()) {
1185    case spv::OpTypeBool:
1186        return true;
1187    case spv::OpTypeInt:
1188        // Match on width, signedness
1189        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1190    case spv::OpTypeFloat:
1191        // Match on width
1192        return a_insn.word(2) == b_insn.word(2);
1193    case spv::OpTypeVector:
1194        // Match on element type, count.
1195        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1196            return false;
1197        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1198            return a_insn.word(3) >= b_insn.word(3);
1199        }
1200        else {
1201            return a_insn.word(3) == b_insn.word(3);
1202        }
1203    case spv::OpTypeMatrix:
1204        // Match on element type, count.
1205        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1206    case spv::OpTypeArray:
1207        // Match on element type, count. these all have the same layout. we don't get here if b_arrayed. This differs from
1208        // vector & matrix types in that the array size is the id of a constant instruction, * not a literal within OpTypeArray
1209        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1210               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1211    case spv::OpTypeStruct:
1212        // Match on all element types
1213        {
1214            if (a_insn.len() != b_insn.len()) {
1215                return false; // Structs cannot match if member counts differ
1216            }
1217
1218            for (unsigned i = 2; i < a_insn.len(); i++) {
1219                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1220                    return false;
1221                }
1222            }
1223
1224            return true;
1225        }
1226    default:
1227        // Remaining types are CLisms, or may not appear in the interfaces we are interested in. Just claim no match.
1228        return false;
1229    }
1230}
1231
1232static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1233    auto it = map.find(id);
1234    if (it == map.end())
1235        return def;
1236    else
1237        return it->second;
1238}
1239
1240static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1241    auto insn = src->get_def(type);
1242    assert(insn != src->end());
1243
1244    switch (insn.opcode()) {
1245    case spv::OpTypePointer:
1246        // See through the ptr -- this is only ever at the toplevel for graphics shaders we're never actually passing
1247        // pointers around.
1248        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1249    case spv::OpTypeArray:
1250        if (strip_array_level) {
1251            return get_locations_consumed_by_type(src, insn.word(2), false);
1252        } else {
1253            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1254        }
1255    case spv::OpTypeMatrix:
1256        // Num locations is the dimension * element size
1257        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1258    case spv::OpTypeVector: {
1259        auto scalar_type = src->get_def(insn.word(2));
1260        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1261            scalar_type.word(2) : 32;
1262
1263        // Locations are 128-bit wide; 3- and 4-component vectors of 64 bit types require two.
1264        return (bit_width * insn.word(3) + 127) / 128;
1265    }
1266    default:
1267        // Everything else is just 1.
1268        return 1;
1269
1270        // TODO: extend to handle 64bit scalar types, whose vectors may need multiple locations.
1271    }
1272}
1273
1274static unsigned get_locations_consumed_by_format(VkFormat format) {
1275    switch (format) {
1276    case VK_FORMAT_R64G64B64A64_SFLOAT:
1277    case VK_FORMAT_R64G64B64A64_SINT:
1278    case VK_FORMAT_R64G64B64A64_UINT:
1279    case VK_FORMAT_R64G64B64_SFLOAT:
1280    case VK_FORMAT_R64G64B64_SINT:
1281    case VK_FORMAT_R64G64B64_UINT:
1282        return 2;
1283    default:
1284        return 1;
1285    }
1286}
1287
1288typedef std::pair<unsigned, unsigned> location_t;
1289typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1290
1291struct interface_var {
1292    uint32_t id;
1293    uint32_t type_id;
1294    uint32_t offset;
1295    bool is_patch;
1296    bool is_block_member;
1297    bool is_relaxed_precision;
1298    // TODO: collect the name, too? Isn't required to be present.
1299};
1300
1301struct shader_stage_attributes {
1302    char const *const name;
1303    bool arrayed_input;
1304    bool arrayed_output;
1305};
1306
1307static shader_stage_attributes shader_stage_attribs[] = {
1308    {"vertex shader", false, false},
1309    {"tessellation control shader", true, true},
1310    {"tessellation evaluation shader", true, false},
1311    {"geometry shader", true, false},
1312    {"fragment shader", false, false},
1313};
1314
1315static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1316    while (true) {
1317
1318        if (def.opcode() == spv::OpTypePointer) {
1319            def = src->get_def(def.word(3));
1320        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1321            def = src->get_def(def.word(2));
1322            is_array_of_verts = false;
1323        } else if (def.opcode() == spv::OpTypeStruct) {
1324            return def;
1325        } else {
1326            return src->end();
1327        }
1328    }
1329}
1330
1331static void collect_interface_block_members(shader_module const *src,
1332                                            std::map<location_t, interface_var> *out,
1333                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1334                                            uint32_t id, uint32_t type_id, bool is_patch) {
1335    // Walk down the type_id presented, trying to determine whether it's actually an interface block.
1336    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1337    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1338        // This isn't an interface block.
1339        return;
1340    }
1341
1342    std::unordered_map<unsigned, unsigned> member_components;
1343    std::unordered_map<unsigned, unsigned> member_relaxed_precision;
1344
1345    // Walk all the OpMemberDecorate for type's result id -- first pass, collect components.
1346    for (auto insn : *src) {
1347        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1348            unsigned member_index = insn.word(2);
1349
1350            if (insn.word(3) == spv::DecorationComponent) {
1351                unsigned component = insn.word(4);
1352                member_components[member_index] = component;
1353            }
1354
1355            if (insn.word(3) == spv::DecorationRelaxedPrecision) {
1356                member_relaxed_precision[member_index] = 1;
1357            }
1358        }
1359    }
1360
1361    // Second pass -- produce the output, from Location decorations
1362    for (auto insn : *src) {
1363        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1364            unsigned member_index = insn.word(2);
1365            unsigned member_type_id = type.word(2 + member_index);
1366
1367            if (insn.word(3) == spv::DecorationLocation) {
1368                unsigned location = insn.word(4);
1369                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1370                auto component_it = member_components.find(member_index);
1371                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1372                bool is_relaxed_precision = member_relaxed_precision.find(member_index) != member_relaxed_precision.end();
1373
1374                for (unsigned int offset = 0; offset < num_locations; offset++) {
1375                    interface_var v = {};
1376                    v.id = id;
1377                    // TODO: member index in interface_var too?
1378                    v.type_id = member_type_id;
1379                    v.offset = offset;
1380                    v.is_patch = is_patch;
1381                    v.is_block_member = true;
1382                    v.is_relaxed_precision = is_relaxed_precision;
1383                    (*out)[std::make_pair(location + offset, component)] = v;
1384                }
1385            }
1386        }
1387    }
1388}
1389
1390static std::map<location_t, interface_var> collect_interface_by_location(
1391        shader_module const *src, spirv_inst_iter entrypoint,
1392        spv::StorageClass sinterface, bool is_array_of_verts) {
1393
1394    std::unordered_map<unsigned, unsigned> var_locations;
1395    std::unordered_map<unsigned, unsigned> var_builtins;
1396    std::unordered_map<unsigned, unsigned> var_components;
1397    std::unordered_map<unsigned, unsigned> blocks;
1398    std::unordered_map<unsigned, unsigned> var_patch;
1399    std::unordered_map<unsigned, unsigned> var_relaxed_precision;
1400
1401    for (auto insn : *src) {
1402
1403        // We consider two interface models: SSO rendezvous-by-location, and builtins. Complain about anything that
1404        // fits neither model.
1405        if (insn.opcode() == spv::OpDecorate) {
1406            if (insn.word(2) == spv::DecorationLocation) {
1407                var_locations[insn.word(1)] = insn.word(3);
1408            }
1409
1410            if (insn.word(2) == spv::DecorationBuiltIn) {
1411                var_builtins[insn.word(1)] = insn.word(3);
1412            }
1413
1414            if (insn.word(2) == spv::DecorationComponent) {
1415                var_components[insn.word(1)] = insn.word(3);
1416            }
1417
1418            if (insn.word(2) == spv::DecorationBlock) {
1419                blocks[insn.word(1)] = 1;
1420            }
1421
1422            if (insn.word(2) == spv::DecorationPatch) {
1423                var_patch[insn.word(1)] = 1;
1424            }
1425
1426            if (insn.word(2) == spv::DecorationRelaxedPrecision) {
1427                var_relaxed_precision[insn.word(1)] = 1;
1428            }
1429        }
1430    }
1431
1432    // TODO: handle grouped decorations
1433    // TODO: handle index=1 dual source outputs from FS -- two vars will have the same location, and we DON'T want to clobber.
1434
1435    // Find the end of the entrypoint's name string. additional zero bytes follow the actual null terminator, to fill out the
1436    // rest of the word - so we only need to look at the last byte in the word to determine which word contains the terminator.
1437    uint32_t word = 3;
1438    while (entrypoint.word(word) & 0xff000000u) {
1439        ++word;
1440    }
1441    ++word;
1442
1443    std::map<location_t, interface_var> out;
1444
1445    for (; word < entrypoint.len(); word++) {
1446        auto insn = src->get_def(entrypoint.word(word));
1447        assert(insn != src->end());
1448        assert(insn.opcode() == spv::OpVariable);
1449
1450        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1451            unsigned id = insn.word(2);
1452            unsigned type = insn.word(1);
1453
1454            int location = value_or_default(var_locations, id, -1);
1455            int builtin = value_or_default(var_builtins, id, -1);
1456            unsigned component = value_or_default(var_components, id, 0); // Unspecified is OK, is 0
1457            bool is_patch = var_patch.find(id) != var_patch.end();
1458            bool is_relaxed_precision = var_relaxed_precision.find(id) != var_relaxed_precision.end();
1459
1460            // All variables and interface block members in the Input or Output storage classes must be decorated with either
1461            // a builtin or an explicit location.
1462            //
1463            // TODO: integrate the interface block support here. For now, don't complain -- a valid SPIRV module will only hit
1464            // this path for the interface block case, as the individual members of the type are decorated, rather than
1465            // variable declarations.
1466
1467            if (location != -1) {
1468                // A user-defined interface variable, with a location. Where a variable occupied multiple locations, emit
1469                // one result for each.
1470                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1471                for (unsigned int offset = 0; offset < num_locations; offset++) {
1472                    interface_var v = {};
1473                    v.id = id;
1474                    v.type_id = type;
1475                    v.offset = offset;
1476                    v.is_patch = is_patch;
1477                    v.is_relaxed_precision = is_relaxed_precision;
1478                    out[std::make_pair(location + offset, component)] = v;
1479                }
1480            } else if (builtin == -1) {
1481                // An interface block instance
1482                collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1483            }
1484        }
1485    }
1486
1487    return out;
1488}
1489
1490static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1491        debug_report_data *report_data, shader_module const *src,
1492        std::unordered_set<uint32_t> const &accessible_ids) {
1493
1494    std::vector<std::pair<uint32_t, interface_var>> out;
1495
1496    for (auto insn : *src) {
1497        if (insn.opcode() == spv::OpDecorate) {
1498            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1499                auto attachment_index = insn.word(3);
1500                auto id = insn.word(1);
1501
1502                if (accessible_ids.count(id)) {
1503                    auto def = src->get_def(id);
1504                    assert(def != src->end());
1505
1506                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1507                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1508                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1509                            interface_var v = {};
1510                            v.id = id;
1511                            v.type_id = def.word(1);
1512                            v.offset = offset;
1513                            out.emplace_back(attachment_index + offset, v);
1514                        }
1515                    }
1516                }
1517            }
1518        }
1519    }
1520
1521    return out;
1522}
1523
1524static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1525        debug_report_data *report_data, shader_module const *src,
1526        std::unordered_set<uint32_t> const &accessible_ids) {
1527
1528    std::unordered_map<unsigned, unsigned> var_sets;
1529    std::unordered_map<unsigned, unsigned> var_bindings;
1530
1531    for (auto insn : *src) {
1532        // All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1533        // DecorationDescriptorSet and DecorationBinding.
1534        if (insn.opcode() == spv::OpDecorate) {
1535            if (insn.word(2) == spv::DecorationDescriptorSet) {
1536                var_sets[insn.word(1)] = insn.word(3);
1537            }
1538
1539            if (insn.word(2) == spv::DecorationBinding) {
1540                var_bindings[insn.word(1)] = insn.word(3);
1541            }
1542        }
1543    }
1544
1545    std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1546
1547    for (auto id : accessible_ids) {
1548        auto insn = src->get_def(id);
1549        assert(insn != src->end());
1550
1551        if (insn.opcode() == spv::OpVariable &&
1552            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1553            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1554            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1555
1556            interface_var v = {};
1557            v.id = insn.word(2);
1558            v.type_id = insn.word(1);
1559            out.emplace_back(std::make_pair(set, binding), v);
1560        }
1561    }
1562
1563    return out;
1564}
1565
1566static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1567                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1568                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1569                                              shader_stage_attributes const *consumer_stage) {
1570    bool pass = true;
1571
1572    auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1573    auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1574
1575    auto a_it = outputs.begin();
1576    auto b_it = inputs.begin();
1577
1578    // Maps sorted by key (location); walk them together to find mismatches
1579    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1580        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1581        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1582        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1583        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1584
1585        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1586            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1587                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1588                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1589                        a_first.second, consumer_stage->name)) {
1590                pass = false;
1591            }
1592            a_it++;
1593        } else if (a_at_end || a_first > b_first) {
1594            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1595                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1596                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1597                        producer_stage->name)) {
1598                pass = false;
1599            }
1600            b_it++;
1601        } else {
1602            // subtleties of arrayed interfaces:
1603            // - if is_patch, then the member is not arrayed, even though the interface may be.
1604            // - if is_block_member, then the extra array level of an arrayed interface is not
1605            //   expressed in the member type -- it's expressed in the block type.
1606            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1607                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1608                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1609                             true)) {
1610                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1611                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1612                            a_first.first, a_first.second,
1613                            describe_type(producer, a_it->second.type_id).c_str(),
1614                            describe_type(consumer, b_it->second.type_id).c_str())) {
1615                    pass = false;
1616                }
1617            }
1618            if (a_it->second.is_patch != b_it->second.is_patch) {
1619                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
1620                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1621                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1622                            "per-%s in %s stage", a_first.first, a_first.second,
1623                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1624                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1625                    pass = false;
1626                }
1627            }
1628            if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) {
1629                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
1630                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1631                            "Decoration mismatch on location %u.%u: %s and %s stages differ in precision",
1632                            a_first.first, a_first.second,
1633                            producer_stage->name,
1634                            consumer_stage->name)) {
1635                    pass = false;
1636                }
1637            }
1638            a_it++;
1639            b_it++;
1640        }
1641    }
1642
1643    return pass;
1644}
1645
1646enum FORMAT_TYPE {
1647    FORMAT_TYPE_UNDEFINED,
1648    FORMAT_TYPE_FLOAT, // UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader
1649    FORMAT_TYPE_SINT,
1650    FORMAT_TYPE_UINT,
1651};
1652
1653static unsigned get_format_type(VkFormat fmt) {
1654    switch (fmt) {
1655    case VK_FORMAT_UNDEFINED:
1656        return FORMAT_TYPE_UNDEFINED;
1657    case VK_FORMAT_R8_SINT:
1658    case VK_FORMAT_R8G8_SINT:
1659    case VK_FORMAT_R8G8B8_SINT:
1660    case VK_FORMAT_R8G8B8A8_SINT:
1661    case VK_FORMAT_R16_SINT:
1662    case VK_FORMAT_R16G16_SINT:
1663    case VK_FORMAT_R16G16B16_SINT:
1664    case VK_FORMAT_R16G16B16A16_SINT:
1665    case VK_FORMAT_R32_SINT:
1666    case VK_FORMAT_R32G32_SINT:
1667    case VK_FORMAT_R32G32B32_SINT:
1668    case VK_FORMAT_R32G32B32A32_SINT:
1669    case VK_FORMAT_R64_SINT:
1670    case VK_FORMAT_R64G64_SINT:
1671    case VK_FORMAT_R64G64B64_SINT:
1672    case VK_FORMAT_R64G64B64A64_SINT:
1673    case VK_FORMAT_B8G8R8_SINT:
1674    case VK_FORMAT_B8G8R8A8_SINT:
1675    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1676    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1677    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1678        return FORMAT_TYPE_SINT;
1679    case VK_FORMAT_R8_UINT:
1680    case VK_FORMAT_R8G8_UINT:
1681    case VK_FORMAT_R8G8B8_UINT:
1682    case VK_FORMAT_R8G8B8A8_UINT:
1683    case VK_FORMAT_R16_UINT:
1684    case VK_FORMAT_R16G16_UINT:
1685    case VK_FORMAT_R16G16B16_UINT:
1686    case VK_FORMAT_R16G16B16A16_UINT:
1687    case VK_FORMAT_R32_UINT:
1688    case VK_FORMAT_R32G32_UINT:
1689    case VK_FORMAT_R32G32B32_UINT:
1690    case VK_FORMAT_R32G32B32A32_UINT:
1691    case VK_FORMAT_R64_UINT:
1692    case VK_FORMAT_R64G64_UINT:
1693    case VK_FORMAT_R64G64B64_UINT:
1694    case VK_FORMAT_R64G64B64A64_UINT:
1695    case VK_FORMAT_B8G8R8_UINT:
1696    case VK_FORMAT_B8G8R8A8_UINT:
1697    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1698    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1699    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1700        return FORMAT_TYPE_UINT;
1701    default:
1702        return FORMAT_TYPE_FLOAT;
1703    }
1704}
1705
1706// characterizes a SPIR-V type appearing in an interface to a FF stage, for comparison to a VkFormat's characterization above.
1707static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1708    auto insn = src->get_def(type);
1709    assert(insn != src->end());
1710
1711    switch (insn.opcode()) {
1712    case spv::OpTypeInt:
1713        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1714    case spv::OpTypeFloat:
1715        return FORMAT_TYPE_FLOAT;
1716    case spv::OpTypeVector:
1717        return get_fundamental_type(src, insn.word(2));
1718    case spv::OpTypeMatrix:
1719        return get_fundamental_type(src, insn.word(2));
1720    case spv::OpTypeArray:
1721        return get_fundamental_type(src, insn.word(2));
1722    case spv::OpTypePointer:
1723        return get_fundamental_type(src, insn.word(3));
1724    case spv::OpTypeImage:
1725        return get_fundamental_type(src, insn.word(2));
1726
1727    default:
1728        return FORMAT_TYPE_UNDEFINED;
1729    }
1730}
1731
1732static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1733    uint32_t bit_pos = u_ffs(stage);
1734    return bit_pos - 1;
1735}
1736
1737static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1738    // Walk the binding descriptions, which describe the step rate and stride of each vertex buffer.  Each binding should
1739    // be specified only once.
1740    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1741    bool pass = true;
1742
1743    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1744        auto desc = &vi->pVertexBindingDescriptions[i];
1745        auto &binding = bindings[desc->binding];
1746        if (binding) {
1747            // TODO: VALIDATION_ERROR_02105 perhaps?
1748            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1749                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1750                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1751                pass = false;
1752            }
1753        } else {
1754            binding = desc;
1755        }
1756    }
1757
1758    return pass;
1759}
1760
1761static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1762                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1763    bool pass = true;
1764
1765    auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1766
1767    // Build index by location
1768    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1769    if (vi) {
1770        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1771            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1772            for (auto j = 0u; j < num_locations; j++) {
1773                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1774            }
1775        }
1776    }
1777
1778    auto it_a = attribs.begin();
1779    auto it_b = inputs.begin();
1780    bool used = false;
1781
1782    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1783        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1784        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1785        auto a_first = a_at_end ? 0 : it_a->first;
1786        auto b_first = b_at_end ? 0 : it_b->first.first;
1787        if (!a_at_end && (b_at_end || a_first < b_first)) {
1788            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1789                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1790                        "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
1791                pass = false;
1792            }
1793            used = false;
1794            it_a++;
1795        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1796            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
1797                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
1798                        b_first)) {
1799                pass = false;
1800            }
1801            it_b++;
1802        } else {
1803            unsigned attrib_type = get_format_type(it_a->second->format);
1804            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1805
1806            // Type checking
1807            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1808                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1809                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1810                            "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
1811                            string_VkFormat(it_a->second->format), a_first,
1812                            describe_type(vs, it_b->second.type_id).c_str())) {
1813                    pass = false;
1814                }
1815            }
1816
1817            // OK!
1818            used = true;
1819            it_b++;
1820        }
1821    }
1822
1823    return pass;
1824}
1825
1826static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1827                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1828                                                    uint32_t subpass_index) {
1829    std::map<uint32_t, VkFormat> color_attachments;
1830    auto subpass = rpci->pSubpasses[subpass_index];
1831    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1832        uint32_t attachment = subpass.pColorAttachments[i].attachment;
1833        if (attachment == VK_ATTACHMENT_UNUSED)
1834            continue;
1835        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1836            color_attachments[i] = rpci->pAttachments[attachment].format;
1837        }
1838    }
1839
1840    bool pass = true;
1841
1842    // TODO: dual source blend index (spv::DecIndex, zero if not provided)
1843
1844    auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
1845
1846    auto it_a = outputs.begin();
1847    auto it_b = color_attachments.begin();
1848
1849    // Walk attachment list and outputs together
1850
1851    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1852        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1853        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1854
1855        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1856            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1857                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1858                        "fragment shader writes to output location %d with no matching attachment", it_a->first.first)) {
1859                pass = false;
1860            }
1861            it_a++;
1862        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1863            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1864                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader",
1865                        it_b->first)) {
1866                pass = false;
1867            }
1868            it_b++;
1869        } else {
1870            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1871            unsigned att_type = get_format_type(it_b->second);
1872
1873            // Type checking
1874            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1875                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1876                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1877                            "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
1878                            string_VkFormat(it_b->second),
1879                            describe_type(fs, it_a->second.type_id).c_str())) {
1880                    pass = false;
1881                }
1882            }
1883
1884            // OK!
1885            it_a++;
1886            it_b++;
1887        }
1888    }
1889
1890    return pass;
1891}
1892
1893// For some analyses, we need to know about all ids referenced by the static call tree of a particular entrypoint. This is
1894// important for identifying the set of shader resources actually used by an entrypoint, for example.
1895// Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1896//  - NOT the shader input/output interfaces.
1897//
1898// TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1899// converting parts of this to be generated from the machine-readable spec instead.
1900static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
1901    std::unordered_set<uint32_t> ids;
1902    std::unordered_set<uint32_t> worklist;
1903    worklist.insert(entrypoint.word(2));
1904
1905    while (!worklist.empty()) {
1906        auto id_iter = worklist.begin();
1907        auto id = *id_iter;
1908        worklist.erase(id_iter);
1909
1910        auto insn = src->get_def(id);
1911        if (insn == src->end()) {
1912            // ID is something we didn't collect in build_def_index. that's OK -- we'll stumble across all kinds of things here
1913            // that we may not care about.
1914            continue;
1915        }
1916
1917        // Try to add to the output set
1918        if (!ids.insert(id).second) {
1919            continue; // If we already saw this id, we don't want to walk it again.
1920        }
1921
1922        switch (insn.opcode()) {
1923        case spv::OpFunction:
1924            // Scan whole body of the function, enlisting anything interesting
1925            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1926                switch (insn.opcode()) {
1927                case spv::OpLoad:
1928                case spv::OpAtomicLoad:
1929                case spv::OpAtomicExchange:
1930                case spv::OpAtomicCompareExchange:
1931                case spv::OpAtomicCompareExchangeWeak:
1932                case spv::OpAtomicIIncrement:
1933                case spv::OpAtomicIDecrement:
1934                case spv::OpAtomicIAdd:
1935                case spv::OpAtomicISub:
1936                case spv::OpAtomicSMin:
1937                case spv::OpAtomicUMin:
1938                case spv::OpAtomicSMax:
1939                case spv::OpAtomicUMax:
1940                case spv::OpAtomicAnd:
1941                case spv::OpAtomicOr:
1942                case spv::OpAtomicXor:
1943                    worklist.insert(insn.word(3)); // ptr
1944                    break;
1945                case spv::OpStore:
1946                case spv::OpAtomicStore:
1947                    worklist.insert(insn.word(1)); // ptr
1948                    break;
1949                case spv::OpAccessChain:
1950                case spv::OpInBoundsAccessChain:
1951                    worklist.insert(insn.word(3)); // base ptr
1952                    break;
1953                case spv::OpSampledImage:
1954                case spv::OpImageSampleImplicitLod:
1955                case spv::OpImageSampleExplicitLod:
1956                case spv::OpImageSampleDrefImplicitLod:
1957                case spv::OpImageSampleDrefExplicitLod:
1958                case spv::OpImageSampleProjImplicitLod:
1959                case spv::OpImageSampleProjExplicitLod:
1960                case spv::OpImageSampleProjDrefImplicitLod:
1961                case spv::OpImageSampleProjDrefExplicitLod:
1962                case spv::OpImageFetch:
1963                case spv::OpImageGather:
1964                case spv::OpImageDrefGather:
1965                case spv::OpImageRead:
1966                case spv::OpImage:
1967                case spv::OpImageQueryFormat:
1968                case spv::OpImageQueryOrder:
1969                case spv::OpImageQuerySizeLod:
1970                case spv::OpImageQuerySize:
1971                case spv::OpImageQueryLod:
1972                case spv::OpImageQueryLevels:
1973                case spv::OpImageQuerySamples:
1974                case spv::OpImageSparseSampleImplicitLod:
1975                case spv::OpImageSparseSampleExplicitLod:
1976                case spv::OpImageSparseSampleDrefImplicitLod:
1977                case spv::OpImageSparseSampleDrefExplicitLod:
1978                case spv::OpImageSparseSampleProjImplicitLod:
1979                case spv::OpImageSparseSampleProjExplicitLod:
1980                case spv::OpImageSparseSampleProjDrefImplicitLod:
1981                case spv::OpImageSparseSampleProjDrefExplicitLod:
1982                case spv::OpImageSparseFetch:
1983                case spv::OpImageSparseGather:
1984                case spv::OpImageSparseDrefGather:
1985                case spv::OpImageTexelPointer:
1986                    worklist.insert(insn.word(3)); // Image or sampled image
1987                    break;
1988                case spv::OpImageWrite:
1989                    worklist.insert(insn.word(1)); // Image -- different operand order to above
1990                    break;
1991                case spv::OpFunctionCall:
1992                    for (uint32_t i = 3; i < insn.len(); i++) {
1993                        worklist.insert(insn.word(i)); // fn itself, and all args
1994                    }
1995                    break;
1996
1997                case spv::OpExtInst:
1998                    for (uint32_t i = 5; i < insn.len(); i++) {
1999                        worklist.insert(insn.word(i)); // Operands to ext inst
2000                    }
2001                    break;
2002                }
2003            }
2004            break;
2005        }
2006    }
2007
2008    return ids;
2009}
2010
2011static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
2012                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
2013                                                          shader_module const *src, spirv_inst_iter type,
2014                                                          VkShaderStageFlagBits stage) {
2015    bool pass = true;
2016
2017    // Strip off ptrs etc
2018    type = get_struct_type(src, type, false);
2019    assert(type != src->end());
2020
2021    // Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step.
2022    // TODO: arrays, matrices, weird sizes
2023    for (auto insn : *src) {
2024        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2025
2026            if (insn.word(3) == spv::DecorationOffset) {
2027                unsigned offset = insn.word(4);
2028                auto size = 4; // Bytes; TODO: calculate this based on the type
2029
2030                bool found_range = false;
2031                for (auto const &range : *push_constant_ranges) {
2032                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2033                        found_range = true;
2034
2035                        if ((range.stageFlags & stage) == 0) {
2036                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2037                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2038                                        "Push constant range covering variable starting at "
2039                                        "offset %u not accessible from stage %s",
2040                                        offset, string_VkShaderStageFlagBits(stage))) {
2041                                pass = false;
2042                            }
2043                        }
2044
2045                        break;
2046                    }
2047                }
2048
2049                if (!found_range) {
2050                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2051                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2052                                "Push constant range covering variable starting at "
2053                                "offset %u not declared in layout",
2054                                offset)) {
2055                        pass = false;
2056                    }
2057                }
2058            }
2059        }
2060    }
2061
2062    return pass;
2063}
2064
2065static bool validate_push_constant_usage(debug_report_data *report_data,
2066                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2067                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2068    bool pass = true;
2069
2070    for (auto id : accessible_ids) {
2071        auto def_insn = src->get_def(id);
2072        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2073            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2074                                                                  src->get_def(def_insn.word(1)), stage);
2075        }
2076    }
2077
2078    return pass;
2079}
2080
2081// For given pipelineLayout verify that the set_layout_node at slot.first
2082//  has the requested binding at slot.second and return ptr to that binding
2083static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2084
2085    if (!pipelineLayout)
2086        return nullptr;
2087
2088    if (slot.first >= pipelineLayout->set_layouts.size())
2089        return nullptr;
2090
2091    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2092}
2093
2094// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2095
2096// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2097//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2098//   to that same cmd buffer by separate thread are not changing state from underneath us
2099// Track the last cmd buffer touched by this thread
2100
2101static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2102    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2103        if (pCB->drawCount[i])
2104            return true;
2105    }
2106    return false;
2107}
2108
2109// Check object status for selected flag state
2110static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2111                            const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2112    if (!(pNode->status & status_mask)) {
2113        char const *const message = validation_error_map[msg_code];
2114        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2115                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, msg_code, "DS",
2116                       "command buffer object 0x%p: %s. %s.", pNode->commandBuffer, fail_msg, message);
2117    }
2118    return false;
2119}
2120
2121// Retrieve pipeline node ptr for given pipeline object
2122static PIPELINE_STATE *getPipelineState(layer_data const *my_data, VkPipeline pipeline) {
2123    auto it = my_data->pipelineMap.find(pipeline);
2124    if (it == my_data->pipelineMap.end()) {
2125        return nullptr;
2126    }
2127    return it->second;
2128}
2129
2130static RENDER_PASS_STATE *getRenderPassState(layer_data const *my_data, VkRenderPass renderpass) {
2131    auto it = my_data->renderPassMap.find(renderpass);
2132    if (it == my_data->renderPassMap.end()) {
2133        return nullptr;
2134    }
2135    return it->second.get();
2136}
2137
2138static FRAMEBUFFER_STATE *getFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer) {
2139    auto it = my_data->frameBufferMap.find(framebuffer);
2140    if (it == my_data->frameBufferMap.end()) {
2141        return nullptr;
2142    }
2143    return it->second.get();
2144}
2145
2146cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2147    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2148    if (it == my_data->descriptorSetLayoutMap.end()) {
2149        return nullptr;
2150    }
2151    return it->second;
2152}
2153
2154static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2155    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2156    if (it == my_data->pipelineLayoutMap.end()) {
2157        return nullptr;
2158    }
2159    return &it->second;
2160}
2161
2162// Return true if for a given PSO, the given state enum is dynamic, else return false
2163static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
2164    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2165        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2166            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2167                return true;
2168        }
2169    }
2170    return false;
2171}
2172
2173// Validate state stored as flags at time of draw call
2174static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
2175                                      UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
2176    bool result = false;
2177    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2178        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2179         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2180        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2181                                  "Dynamic line width state not set for this command buffer", msg_code);
2182    }
2183    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2184        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2185        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2186                                  "Dynamic depth bias state not set for this command buffer", msg_code);
2187    }
2188    if (pPipe->blendConstantsEnabled) {
2189        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2190                                  "Dynamic blend constants state not set for this command buffer", msg_code);
2191    }
2192    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2193        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2194        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2195                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
2196    }
2197    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2198        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2199        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2200                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
2201        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2202                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
2203        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2204                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
2205    }
2206    if (indexed) {
2207        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2208                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
2209    }
2210
2211    return result;
2212}
2213
2214// Verify attachment reference compatibility according to spec
2215//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2216//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2217//   to make sure that format and samples counts match.
2218//  If not, they are not compatible.
2219static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2220                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2221                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2222                                             const VkAttachmentDescription *pSecondaryAttachments) {
2223    // Check potential NULL cases first to avoid nullptr issues later
2224    if (pPrimary == nullptr) {
2225        if (pSecondary == nullptr) {
2226            return true;
2227        }
2228        return false;
2229    } else if (pSecondary == nullptr) {
2230        return false;
2231    }
2232    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2233        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2234            return true;
2235    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2236        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2237            return true;
2238    } else { // Format and sample count must match
2239        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2240            return true;
2241        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2242            return false;
2243        }
2244        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2245             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2246            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2247             pSecondaryAttachments[pSecondary[index].attachment].samples))
2248            return true;
2249    }
2250    // Format and sample counts didn't match
2251    return false;
2252}
2253// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2254// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2255static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2256                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2257    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2258        stringstream errorStr;
2259        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2260                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2261        errorMsg = errorStr.str();
2262        return false;
2263    }
2264    uint32_t spIndex = 0;
2265    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2266        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2267        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2268        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2269        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2270        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2271            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2272                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2273                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2274                stringstream errorStr;
2275                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2276                errorMsg = errorStr.str();
2277                return false;
2278            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2279                                                         primaryColorCount, primaryRPCI->pAttachments,
2280                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2281                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2282                stringstream errorStr;
2283                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2284                errorMsg = errorStr.str();
2285                return false;
2286            }
2287        }
2288
2289        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2290                                              1, primaryRPCI->pAttachments,
2291                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2292                                              1, secondaryRPCI->pAttachments)) {
2293            stringstream errorStr;
2294            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2295            errorMsg = errorStr.str();
2296            return false;
2297        }
2298
2299        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2300        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2301        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2302        for (uint32_t i = 0; i < inputMax; ++i) {
2303            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2304                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2305                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2306                stringstream errorStr;
2307                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2308                errorMsg = errorStr.str();
2309                return false;
2310            }
2311        }
2312    }
2313    return true;
2314}
2315
2316// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2317// pipelineLayout[layoutIndex]
2318static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *descriptor_set,
2319                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2320                                            string &errorMsg) {
2321    auto num_sets = pipeline_layout->set_layouts.size();
2322    if (layoutIndex >= num_sets) {
2323        stringstream errorStr;
2324        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2325                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2326                 << layoutIndex;
2327        errorMsg = errorStr.str();
2328        return false;
2329    }
2330    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2331    return descriptor_set->IsCompatible(layout_node, &errorMsg);
2332}
2333
2334// Validate that data for each specialization entry is fully contained within the buffer.
2335static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2336    bool pass = true;
2337
2338    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2339
2340    if (spec) {
2341        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2342            // TODO: This is a good place for VALIDATION_ERROR_00589.
2343            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2344                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
2345                            VALIDATION_ERROR_00590, "SC",
2346                            "Specialization entry %u (for constant id %u) references memory outside provided "
2347                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2348                            " bytes provided). %s.",
2349                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2350                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize,
2351                            validation_error_map[VALIDATION_ERROR_00590])) {
2352
2353                    pass = false;
2354                }
2355            }
2356        }
2357    }
2358
2359    return pass;
2360}
2361
2362static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2363                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2364    auto type = module->get_def(type_id);
2365
2366    descriptor_count = 1;
2367
2368    // Strip off any array or ptrs. Where we remove array levels, adjust the  descriptor count for each dimension.
2369    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2370        if (type.opcode() == spv::OpTypeArray) {
2371            descriptor_count *= get_constant_value(module, type.word(3));
2372            type = module->get_def(type.word(2));
2373        }
2374        else {
2375            type = module->get_def(type.word(3));
2376        }
2377    }
2378
2379    switch (type.opcode()) {
2380    case spv::OpTypeStruct: {
2381        for (auto insn : *module) {
2382            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2383                if (insn.word(2) == spv::DecorationBlock) {
2384                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2385                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2386                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2387                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2388                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2389                }
2390            }
2391        }
2392
2393        // Invalid
2394        return false;
2395    }
2396
2397    case spv::OpTypeSampler:
2398        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2399            descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2400
2401    case spv::OpTypeSampledImage:
2402        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2403            // Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel
2404            // buffer descriptor doesn't really provide one. Allow this slight mismatch.
2405            auto image_type = module->get_def(type.word(2));
2406            auto dim = image_type.word(3);
2407            auto sampled = image_type.word(7);
2408            return dim == spv::DimBuffer && sampled == 1;
2409        }
2410        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2411
2412    case spv::OpTypeImage: {
2413        // Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler.
2414        // SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable.
2415        auto dim = type.word(3);
2416        auto sampled = type.word(7);
2417
2418        if (dim == spv::DimSubpassData) {
2419            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2420        } else if (dim == spv::DimBuffer) {
2421            if (sampled == 1) {
2422                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2423            } else {
2424                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2425            }
2426        } else if (sampled == 1) {
2427            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2428                descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2429        } else {
2430            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2431        }
2432    }
2433
2434    // We shouldn't really see any other junk types -- but if we do, they're a mismatch.
2435    default:
2436        return false; // Mismatch
2437    }
2438}
2439
2440static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2441    if (!feature) {
2442        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2443                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2444                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2445                    "enabled on the device",
2446                    feature_name)) {
2447            return false;
2448        }
2449    }
2450
2451    return true;
2452}
2453
2454static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2455                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2456    bool pass = true;
2457
2458
2459    for (auto insn : *src) {
2460        if (insn.opcode() == spv::OpCapability) {
2461            switch (insn.word(1)) {
2462            case spv::CapabilityMatrix:
2463            case spv::CapabilityShader:
2464            case spv::CapabilityInputAttachment:
2465            case spv::CapabilitySampled1D:
2466            case spv::CapabilityImage1D:
2467            case spv::CapabilitySampledBuffer:
2468            case spv::CapabilityImageBuffer:
2469            case spv::CapabilityImageQuery:
2470            case spv::CapabilityDerivativeControl:
2471                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2472                break;
2473
2474            case spv::CapabilityGeometry:
2475                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2476                break;
2477
2478            case spv::CapabilityTessellation:
2479                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2480                break;
2481
2482            case spv::CapabilityFloat64:
2483                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2484                break;
2485
2486            case spv::CapabilityInt64:
2487                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2488                break;
2489
2490            case spv::CapabilityTessellationPointSize:
2491            case spv::CapabilityGeometryPointSize:
2492                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2493                                        "shaderTessellationAndGeometryPointSize");
2494                break;
2495
2496            case spv::CapabilityImageGatherExtended:
2497                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2498                break;
2499
2500            case spv::CapabilityStorageImageMultisample:
2501                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2502                break;
2503
2504            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2505                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2506                                        "shaderUniformBufferArrayDynamicIndexing");
2507                break;
2508
2509            case spv::CapabilitySampledImageArrayDynamicIndexing:
2510                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2511                                        "shaderSampledImageArrayDynamicIndexing");
2512                break;
2513
2514            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2515                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2516                                        "shaderStorageBufferArrayDynamicIndexing");
2517                break;
2518
2519            case spv::CapabilityStorageImageArrayDynamicIndexing:
2520                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2521                                        "shaderStorageImageArrayDynamicIndexing");
2522                break;
2523
2524            case spv::CapabilityClipDistance:
2525                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2526                break;
2527
2528            case spv::CapabilityCullDistance:
2529                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2530                break;
2531
2532            case spv::CapabilityImageCubeArray:
2533                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2534                break;
2535
2536            case spv::CapabilitySampleRateShading:
2537                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2538                break;
2539
2540            case spv::CapabilitySparseResidency:
2541                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2542                break;
2543
2544            case spv::CapabilityMinLod:
2545                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2546                break;
2547
2548            case spv::CapabilitySampledCubeArray:
2549                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2550                break;
2551
2552            case spv::CapabilityImageMSArray:
2553                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2554                break;
2555
2556            case spv::CapabilityStorageImageExtendedFormats:
2557                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2558                                        "shaderStorageImageExtendedFormats");
2559                break;
2560
2561            case spv::CapabilityInterpolationFunction:
2562                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2563                break;
2564
2565            case spv::CapabilityStorageImageReadWithoutFormat:
2566                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2567                                        "shaderStorageImageReadWithoutFormat");
2568                break;
2569
2570            case spv::CapabilityStorageImageWriteWithoutFormat:
2571                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2572                                        "shaderStorageImageWriteWithoutFormat");
2573                break;
2574
2575            case spv::CapabilityMultiViewport:
2576                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2577                break;
2578
2579            default:
2580                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2581                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2582                            "Shader declares capability %u, not supported in Vulkan.",
2583                            insn.word(1)))
2584                    pass = false;
2585                break;
2586            }
2587        }
2588    }
2589
2590    return pass;
2591}
2592
2593
2594static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2595    auto type = module->get_def(type_id);
2596
2597    while (true) {
2598        switch (type.opcode()) {
2599        case spv::OpTypeArray:
2600        case spv::OpTypeSampledImage:
2601            type = module->get_def(type.word(2));
2602            break;
2603        case spv::OpTypePointer:
2604            type = module->get_def(type.word(3));
2605            break;
2606        case spv::OpTypeImage: {
2607            auto dim = type.word(3);
2608            auto arrayed = type.word(5);
2609            auto msaa = type.word(6);
2610
2611            switch (dim) {
2612            case spv::Dim1D:
2613                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2614            case spv::Dim2D:
2615                return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2616                    (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2617            case spv::Dim3D:
2618                return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2619            case spv::DimCube:
2620                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2621            case spv::DimSubpassData:
2622                return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2623            default:  // buffer, etc.
2624                return 0;
2625            }
2626        }
2627        default:
2628            return 0;
2629        }
2630    }
2631}
2632
2633static bool
2634validate_pipeline_shader_stage(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *pStage,
2635                               PIPELINE_STATE *pipeline, shader_module **out_module, spirv_inst_iter *out_entrypoint,
2636                               VkPhysicalDeviceFeatures const *enabledFeatures,
2637                               std::unordered_map<VkShaderModule, std::unique_ptr<shader_module>> const &shaderModuleMap) {
2638    bool pass = true;
2639    auto module_it = shaderModuleMap.find(pStage->module);
2640    auto module = *out_module = module_it->second.get();
2641
2642    // Find the entrypoint
2643    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2644    if (entrypoint == module->end()) {
2645        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, VALIDATION_ERROR_00510,
2646                    "SC", "No entrypoint found named `%s` for stage %s. %s.", pStage->pName,
2647                    string_VkShaderStageFlagBits(pStage->stage), validation_error_map[VALIDATION_ERROR_00510])) {
2648            return false;   // no point continuing beyond here, any analysis is just going to be garbage.
2649        }
2650    }
2651
2652    // Validate shader capabilities against enabled device features
2653    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2654
2655    // Mark accessible ids
2656    auto accessible_ids = mark_accessible_ids(module, entrypoint);
2657
2658    // Validate descriptor set layout against what the entrypoint actually uses
2659    auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2660
2661    auto pipelineLayout = pipeline->pipeline_layout;
2662
2663    pass &= validate_specialization_offsets(report_data, pStage);
2664    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2665
2666    // Validate descriptor use
2667    for (auto use : descriptor_uses) {
2668        // While validating shaders capture which slots are used by the pipeline
2669        auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2670        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2671
2672        // Verify given pipelineLayout has requested setLayout with requested binding
2673        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2674        unsigned required_descriptor_count;
2675
2676        if (!binding) {
2677            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2678                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2679                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2680                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2681                pass = false;
2682            }
2683        } else if (~binding->stageFlags & pStage->stage) {
2684            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2685                        0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2686                        "Shader uses descriptor slot %u.%u (used "
2687                        "as type `%s`) but descriptor not "
2688                        "accessible from stage %s",
2689                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2690                        string_VkShaderStageFlagBits(pStage->stage))) {
2691                pass = false;
2692            }
2693        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2694                                          required_descriptor_count)) {
2695            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2696                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2697                                                                       "%u.%u (used as type `%s`) but "
2698                                                                       "descriptor of type %s",
2699                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2700                        string_VkDescriptorType(binding->descriptorType))) {
2701                pass = false;
2702            }
2703        } else if (binding->descriptorCount < required_descriptor_count) {
2704            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2705                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2706                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2707                        required_descriptor_count, use.first.first, use.first.second,
2708                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2709                pass = false;
2710            }
2711        }
2712    }
2713
2714    // Validate use of input attachments against subpass structure
2715    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2716        auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2717
2718        auto rpci = pipeline->render_pass_ci.ptr();
2719        auto subpass = pipeline->graphicsPipelineCI.subpass;
2720
2721        for (auto use : input_attachment_uses) {
2722            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2723            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2724                    input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2725
2726            if (index == VK_ATTACHMENT_UNUSED) {
2727                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2728                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2729                            "Shader consumes input attachment index %d but not provided in subpass",
2730                            use.first)) {
2731                    pass = false;
2732                }
2733            }
2734            else if (get_format_type(rpci->pAttachments[index].format) !=
2735                    get_fundamental_type(module, use.second.type_id)) {
2736                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2737                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2738                            "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2739                            use.first, string_VkFormat(rpci->pAttachments[index].format),
2740                            describe_type(module, use.second.type_id).c_str())) {
2741                    pass = false;
2742                }
2743            }
2744        }
2745    }
2746
2747    return pass;
2748}
2749
2750
2751// Validate that the shaders used by the given pipeline and store the active_slots
2752//  that are actually used by the pipeline into pPipeline->active_slots
2753static bool
2754validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2755                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2756                                           std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2757    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2758    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2759    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2760
2761    shader_module *shaders[5];
2762    memset(shaders, 0, sizeof(shaders));
2763    spirv_inst_iter entrypoints[5];
2764    memset(entrypoints, 0, sizeof(entrypoints));
2765    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2766    bool pass = true;
2767
2768    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2769        auto pStage = &pCreateInfo->pStages[i];
2770        auto stage_id = get_shader_stage_id(pStage->stage);
2771        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2772                                               &shaders[stage_id], &entrypoints[stage_id],
2773                                               enabledFeatures, shaderModuleMap);
2774    }
2775
2776    // if the shader stages are no good individually, cross-stage validation is pointless.
2777    if (!pass)
2778        return false;
2779
2780    vi = pCreateInfo->pVertexInputState;
2781
2782    if (vi) {
2783        pass &= validate_vi_consistency(report_data, vi);
2784    }
2785
2786    if (shaders[vertex_stage]) {
2787        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2788    }
2789
2790    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2791    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2792
2793    while (!shaders[producer] && producer != fragment_stage) {
2794        producer++;
2795        consumer++;
2796    }
2797
2798    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2799        assert(shaders[producer]);
2800        if (shaders[consumer]) {
2801            pass &= validate_interface_between_stages(report_data,
2802                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2803                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2804
2805            producer = consumer;
2806        }
2807    }
2808
2809    if (shaders[fragment_stage]) {
2810        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2811                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2812    }
2813
2814    return pass;
2815}
2816
2817static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2818                                      VkPhysicalDeviceFeatures const *enabledFeatures,
2819                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2820    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2821
2822    shader_module *module;
2823    spirv_inst_iter entrypoint;
2824
2825    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2826                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2827}
2828// Return Set node ptr for specified set or else NULL
2829cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2830    auto set_it = my_data->setMap.find(set);
2831    if (set_it == my_data->setMap.end()) {
2832        return NULL;
2833    }
2834    return set_it->second;
2835}
2836
2837// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2838static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
2839    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2840        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2841        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2842    }
2843    return VK_SAMPLE_COUNT_1_BIT;
2844}
2845
2846static void list_bits(std::ostream& s, uint32_t bits) {
2847    for (int i = 0; i < 32 && bits; i++) {
2848        if (bits & (1 << i)) {
2849            s << i;
2850            bits &= ~(1 << i);
2851            if (bits) {
2852                s << ",";
2853            }
2854        }
2855    }
2856}
2857
2858// Validate draw-time state related to the PSO
2859static bool ValidatePipelineDrawtimeState(layer_data const *my_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
2860                                          PIPELINE_STATE const *pPipeline) {
2861    bool skip_call = false;
2862
2863    // Verify vertex binding
2864    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2865        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2866            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
2867            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
2868                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
2869                skip_call |= log_msg(
2870                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2871                    DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2872                    "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
2873                    "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
2874                    "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
2875                    (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding);
2876            }
2877        }
2878    } else {
2879        if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
2880            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2881                                 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2882                                 "Vertex buffers are bound to command buffer (0x%p"
2883                                 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2884                                 pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline);
2885        }
2886    }
2887    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2888    // Skip check if rasterization is disabled or there is no viewport.
2889    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2890         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2891        pPipeline->graphicsPipelineCI.pViewportState) {
2892        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2893        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2894
2895        if (dynViewport) {
2896            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
2897            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
2898            if (missingViewportMask) {
2899                std::stringstream ss;
2900                ss << "Dynamic viewport(s) ";
2901                list_bits(ss, missingViewportMask);
2902                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
2903                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2904                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2905                                     "%s", ss.str().c_str());
2906            }
2907        }
2908
2909        if (dynScissor) {
2910            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
2911            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
2912            if (missingScissorMask) {
2913                std::stringstream ss;
2914                ss << "Dynamic scissor(s) ";
2915                list_bits(ss, missingScissorMask);
2916                ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
2917                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2918                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2919                                     "%s", ss.str().c_str());
2920            }
2921        }
2922    }
2923
2924    // Verify that any MSAA request in PSO matches sample# in bound FB
2925    // Skip the check if rasterization is disabled.
2926    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2927        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2928        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2929        if (pCB->activeRenderPass) {
2930            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
2931            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2932            uint32_t i;
2933
2934            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2935            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2936                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2937                skip_call |=
2938                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2939                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
2940                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
2941                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
2942                                "must be the same at draw-time.",
2943                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
2944                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2945            }
2946
2947            unsigned subpass_num_samples = 0;
2948
2949            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2950                auto attachment = subpass_desc->pColorAttachments[i].attachment;
2951                if (attachment != VK_ATTACHMENT_UNUSED)
2952                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2953            }
2954
2955            if (subpass_desc->pDepthStencilAttachment &&
2956                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
2957                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
2958                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2959            }
2960
2961            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
2962                skip_call |=
2963                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2964                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2965                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2966                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2967                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2968                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2969            }
2970        } else {
2971            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2972                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2973                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2974                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2975        }
2976    }
2977    // Verify that PSO creation renderPass is compatible with active renderPass
2978    if (pCB->activeRenderPass) {
2979        std::string err_string;
2980        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
2981            !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
2982                                             err_string)) {
2983            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2984            skip_call |=
2985                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2986                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2987                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
2988                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
2989                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass),
2990                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
2991                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2992        }
2993
2994        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
2995            skip_call |=
2996                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2997                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2998                        "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
2999                        pCB->activeSubpass);
3000        }
3001    }
3002    // TODO : Add more checks here
3003
3004    return skip_call;
3005}
3006
3007// Validate overall state at the time of a draw call
3008static bool ValidateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
3009                              const VkPipelineBindPoint bind_point, const char *function,
3010                              UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
3011    bool result = false;
3012    auto const &state = cb_node->lastBound[bind_point];
3013    PIPELINE_STATE *pPipe = state.pipeline_state;
3014    if (nullptr == pPipe) {
3015        result |= log_msg(
3016            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3017            DRAWSTATE_INVALID_PIPELINE, "DS",
3018            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3019        // Early return as any further checks below will be busted w/o a pipeline
3020        if (result)
3021            return true;
3022    }
3023    // First check flag states
3024    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
3025        result = validate_draw_state_flags(my_data, cb_node, pPipe, indexed, msg_code);
3026
3027    // Now complete other state checks
3028    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3029        string errorString;
3030        auto pipeline_layout = pPipe->pipeline_layout;
3031
3032        for (const auto &set_binding_pair : pPipe->active_slots) {
3033            uint32_t setIndex = set_binding_pair.first;
3034            // If valid set is not bound throw an error
3035            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3036                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3037                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
3038                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
3039                                  setIndex);
3040            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3041                                                        errorString)) {
3042                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
3043                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
3044                result |=
3045                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3046                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
3047                            "VkDescriptorSet (0x%" PRIxLEAST64
3048                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
3049                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3050                            errorString.c_str());
3051            } else { // Valid set is bound and layout compatible, validate that it's updated
3052                // Pull the set node
3053                cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
3054                // Gather active bindings
3055                std::unordered_set<uint32_t> active_bindings;
3056                for (auto binding : set_binding_pair.second) {
3057                    active_bindings.insert(binding.first);
3058                }
3059                // Make sure set has been updated if it has no immutable samplers
3060                //  If it has immutable samplers, we'll flag error later as needed depending on binding
3061                if (!descriptor_set->IsUpdated()) {
3062                    for (auto binding : active_bindings) {
3063                        if (!descriptor_set->GetImmutableSamplerPtrFromBinding(binding)) {
3064                            result |= log_msg(
3065                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3066                                (uint64_t)descriptor_set->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3067                                "Descriptor Set 0x%" PRIxLEAST64 " bound but was never updated. It is now being used to draw so "
3068                                "this will result in undefined behavior.",
3069                                (uint64_t)descriptor_set->GetSet());
3070                        }
3071                    }
3072                }
3073                // Validate the draw-time state for this descriptor set
3074                std::string err_str;
3075                if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], &err_str)) {
3076                    auto set = descriptor_set->GetSet();
3077                    result |=
3078                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3079                                reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3080                                "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
3081                                reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
3082                }
3083            }
3084        }
3085    }
3086
3087    // Check general pipeline state that needs to be validated at drawtime
3088    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
3089        result |= ValidatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
3090
3091    return result;
3092}
3093
3094static void UpdateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
3095    auto const &state = cb_state->lastBound[bind_point];
3096    PIPELINE_STATE *pPipe = state.pipeline_state;
3097    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3098        for (const auto &set_binding_pair : pPipe->active_slots) {
3099            uint32_t setIndex = set_binding_pair.first;
3100            // Pull the set node
3101            cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
3102            // Bind this set and its active descriptor resources to the command buffer
3103            descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
3104            // For given active slots record updated images & buffers
3105            descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
3106        }
3107    }
3108    if (pPipe->vertexBindingDescriptions.size() > 0) {
3109        cb_state->vertex_buffer_used = true;
3110    }
3111}
3112
3113// Validate HW line width capabilities prior to setting requested line width.
3114static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3115    bool skip_call = false;
3116
3117    // First check to see if the physical device supports wide lines.
3118    if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
3119        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3120                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3121                                            "not supported/enabled so lineWidth must be 1.0f!",
3122                             lineWidth);
3123    } else {
3124        // Otherwise, make sure the width falls in the valid range.
3125        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3126            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3127            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3128                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3129                                                          "to between [%f, %f]!",
3130                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3131                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3132        }
3133    }
3134
3135    return skip_call;
3136}
3137
3138// Verify that create state for a pipeline is valid
3139static bool verifyPipelineCreateState(layer_data *my_data, std::vector<PIPELINE_STATE *> pPipelines, int pipelineIndex) {
3140    bool skip_call = false;
3141
3142    PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
3143
3144    // If create derivative bit is set, check that we've specified a base
3145    // pipeline correctly, and that the base pipeline was created to allow
3146    // derivatives.
3147    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3148        PIPELINE_STATE *pBasePipeline = nullptr;
3149        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3150              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3151            // This check is a superset of VALIDATION_ERROR_00526 and VALIDATION_ERROR_00528
3152            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3153                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3154                                 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3155        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3156            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3157                skip_call |=
3158                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3159                            VALIDATION_ERROR_00518, "DS",
3160                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
3161                            validation_error_map[VALIDATION_ERROR_00518]);
3162            } else {
3163                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3164            }
3165        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3166            pBasePipeline = getPipelineState(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3167        }
3168
3169        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3170            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3171                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3172                                 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3173        }
3174    }
3175
3176    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3177        if (!my_data->enabled_features.independentBlend) {
3178            if (pPipeline->attachments.size() > 1) {
3179                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3180                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3181                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3182                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3183                    // only attachment state, so memcmp is best suited for the comparison
3184                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3185                               sizeof(pAttachments[0]))) {
3186                        skip_call |=
3187                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3188                                    VALIDATION_ERROR_01532, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3189                                                                  "enabled, all elements of pAttachments must be identical. %s",
3190                                    validation_error_map[VALIDATION_ERROR_01532]);
3191                        break;
3192                    }
3193                }
3194            }
3195        }
3196        if (!my_data->enabled_features.logicOp &&
3197            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3198            skip_call |=
3199                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3200                        VALIDATION_ERROR_01533, "DS",
3201                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
3202                        validation_error_map[VALIDATION_ERROR_01533]);
3203        }
3204    }
3205
3206    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3207    // produces nonsense errors that confuse users. Other layers should already
3208    // emit errors for renderpass being invalid.
3209    auto renderPass = getRenderPassState(my_data, pPipeline->graphicsPipelineCI.renderPass);
3210    if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
3211        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3212                             VALIDATION_ERROR_02122, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3213                                                           "is out of range for this renderpass (0..%u). %s",
3214                             pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1,
3215                             validation_error_map[VALIDATION_ERROR_02122]);
3216    }
3217
3218    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features,
3219                                                    my_data->shaderModuleMap)) {
3220        skip_call = true;
3221    }
3222    // Each shader's stage must be unique
3223    if (pPipeline->duplicate_shaders) {
3224        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3225            if (pPipeline->duplicate_shaders & stage) {
3226                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3227                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3228                                     "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3229                                     string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3230            }
3231        }
3232    }
3233    // VS is required
3234    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3235        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3236                             VALIDATION_ERROR_00532, "DS", "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
3237                             validation_error_map[VALIDATION_ERROR_00532]);
3238    }
3239    // Either both or neither TC/TE shaders should be defined
3240    if ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3241        !(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3242        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3243                             VALIDATION_ERROR_00534, "DS",
3244                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3245                             validation_error_map[VALIDATION_ERROR_00534]);
3246    }
3247    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3248        (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3249        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3250                             VALIDATION_ERROR_00535, "DS",
3251                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3252                             validation_error_map[VALIDATION_ERROR_00535]);
3253    }
3254    // Compute shaders should be specified independent of Gfx shaders
3255    if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
3256        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3257                             VALIDATION_ERROR_00533, "DS",
3258                             "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
3259                             validation_error_map[VALIDATION_ERROR_00533]);
3260    }
3261    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3262    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3263    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3264        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3265         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3266        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3267                             VALIDATION_ERROR_02099, "DS", "Invalid Pipeline CreateInfo State: "
3268                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3269                                                           "topology for tessellation pipelines. %s",
3270                             validation_error_map[VALIDATION_ERROR_02099]);
3271    }
3272    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3273        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3274        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3275            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3276                                 VALIDATION_ERROR_02100, "DS", "Invalid Pipeline CreateInfo State: "
3277                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3278                                                               "topology is only valid for tessellation pipelines. %s",
3279                                 validation_error_map[VALIDATION_ERROR_02100]);
3280        }
3281    }
3282
3283    if (pPipeline->graphicsPipelineCI.pTessellationState &&
3284        ((pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints == 0) ||
3285         (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints >
3286          my_data->phys_dev_properties.properties.limits.maxTessellationPatchSize))) {
3287        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3288                             VALIDATION_ERROR_01426, "DS", "Invalid Pipeline CreateInfo State: "
3289                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3290                                                           "topology used with patchControlPoints value %u."
3291                                                           " patchControlPoints should be >0 and <=%u. %s",
3292                             pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints,
3293                             my_data->phys_dev_properties.properties.limits.maxTessellationPatchSize,
3294                             validation_error_map[VALIDATION_ERROR_01426]);
3295    }
3296
3297    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3298    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3299        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3300            skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
3301                                         reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
3302                                         pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3303        }
3304    }
3305
3306    // If rasterization is not disabled and subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a
3307    // valid structure
3308    if (pPipeline->graphicsPipelineCI.pRasterizationState &&
3309        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3310        auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3311        if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3312            subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3313            if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3314                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
3315                                     0, __LINE__, VALIDATION_ERROR_02115, "DS",
3316                                     "Invalid Pipeline CreateInfo State: "
3317                                     "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3318                                     "depth/stencil attachment. %s",
3319                                     validation_error_map[VALIDATION_ERROR_02115]);
3320            }
3321        }
3322    }
3323    return skip_call;
3324}
3325
3326// Free the Pipeline nodes
3327static void deletePipelines(layer_data *my_data) {
3328    if (my_data->pipelineMap.size() <= 0)
3329        return;
3330    for (auto &pipe_map_pair : my_data->pipelineMap) {
3331        delete pipe_map_pair.second;
3332    }
3333    my_data->pipelineMap.clear();
3334}
3335
3336// Block of code at start here specifically for managing/tracking DSs
3337
3338// Return Pool node ptr for specified pool or else NULL
3339DESCRIPTOR_POOL_STATE *getDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
3340    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3341    if (pool_it == dev_data->descriptorPoolMap.end()) {
3342        return NULL;
3343    }
3344    return pool_it->second;
3345}
3346
3347// Return false if update struct is of valid type, otherwise flag error and return code from callback
3348static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3349    switch (pUpdateStruct->sType) {
3350    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3351    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3352        return false;
3353    default:
3354        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3355                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3356                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3357                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3358    }
3359}
3360
3361// Set count for given update struct in the last parameter
3362static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3363    switch (pUpdateStruct->sType) {
3364    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3365        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3366    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3367        // TODO : Need to understand this case better and make sure code is correct
3368        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3369    default:
3370        return 0;
3371    }
3372}
3373
3374// For given layout and update, return the first overall index of the layout that is updated
3375static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3376                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3377    return binding_start_index + arrayIndex;
3378}
3379// For given layout and update, return the last overall index of the layout that is updated
3380static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3381                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3382    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3383    return binding_start_index + arrayIndex + count - 1;
3384}
3385// Verify that the descriptor type in the update struct matches what's expected by the layout
3386static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3387                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3388    // First get actual type of update
3389    bool skip_call = false;
3390    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3391    switch (pUpdateStruct->sType) {
3392    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3393        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3394        break;
3395    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3396        // No need to validate
3397        return false;
3398        break;
3399    default:
3400        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3401                             DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3402                             "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3403                             string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3404    }
3405    if (!skip_call) {
3406        if (layout_type != actualType) {
3407            skip_call |= log_msg(
3408                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3409                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3410                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3411                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3412        }
3413    }
3414    return skip_call;
3415}
3416//TODO: Consolidate functions
3417bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3418    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3419    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3420        return false;
3421    }
3422    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3423    imgpair.subresource.aspectMask = aspectMask;
3424    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3425    if (imgsubIt == pCB->imageLayoutMap.end()) {
3426        return false;
3427    }
3428    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3429        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3430                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3431                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3432                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3433    }
3434    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3435        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3436                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3437                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3438                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3439    }
3440    node = imgsubIt->second;
3441    return true;
3442}
3443
3444bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3445    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3446        return false;
3447    }
3448    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3449    imgpair.subresource.aspectMask = aspectMask;
3450    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3451    if (imgsubIt == my_data->imageLayoutMap.end()) {
3452        return false;
3453    }
3454    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3455        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3456                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3457                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3458                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3459    }
3460    layout = imgsubIt->second.layout;
3461    return true;
3462}
3463
3464// find layout(s) on the cmd buf level
3465bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3466    ImageSubresourcePair imgpair = {image, true, range};
3467    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3468    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3469    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3470    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3471    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3472    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3473        imgpair = {image, false, VkImageSubresource()};
3474        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3475        if (imgsubIt == pCB->imageLayoutMap.end())
3476            return false;
3477        node = imgsubIt->second;
3478    }
3479    return true;
3480}
3481
3482// find layout(s) on the global level
3483bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3484    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3485    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3486    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3487    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3488    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3489    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3490        imgpair = {imgpair.image, false, VkImageSubresource()};
3491        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3492        if (imgsubIt == my_data->imageLayoutMap.end())
3493            return false;
3494        layout = imgsubIt->second.layout;
3495    }
3496    return true;
3497}
3498
3499bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3500    ImageSubresourcePair imgpair = {image, true, range};
3501    return FindLayout(my_data, imgpair, layout);
3502}
3503
3504bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3505    auto sub_data = my_data->imageSubresourceMap.find(image);
3506    if (sub_data == my_data->imageSubresourceMap.end())
3507        return false;
3508    auto image_state = getImageState(my_data, image);
3509    if (!image_state)
3510        return false;
3511    bool ignoreGlobal = false;
3512    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3513    // potential errors in this case.
3514    if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) {
3515        ignoreGlobal = true;
3516    }
3517    for (auto imgsubpair : sub_data->second) {
3518        if (ignoreGlobal && !imgsubpair.hasSubresource)
3519            continue;
3520        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3521        if (img_data != my_data->imageLayoutMap.end()) {
3522            layouts.push_back(img_data->second.layout);
3523        }
3524    }
3525    return true;
3526}
3527
3528// Set the layout on the global level
3529void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3530    VkImage &image = imgpair.image;
3531    // TODO (mlentine): Maybe set format if new? Not used atm.
3532    my_data->imageLayoutMap[imgpair].layout = layout;
3533    // TODO (mlentine): Maybe make vector a set?
3534    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3535    if (subresource == my_data->imageSubresourceMap[image].end()) {
3536        my_data->imageSubresourceMap[image].push_back(imgpair);
3537    }
3538}
3539
3540// Set the layout on the cmdbuf level
3541void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3542    pCB->imageLayoutMap[imgpair] = node;
3543    // TODO (mlentine): Maybe make vector a set?
3544    auto subresource =
3545        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3546    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3547        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3548    }
3549}
3550
3551void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3552    // TODO (mlentine): Maybe make vector a set?
3553    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3554        pCB->imageSubresourceMap[imgpair.image].end()) {
3555        pCB->imageLayoutMap[imgpair].layout = layout;
3556    } else {
3557        // TODO (mlentine): Could be expensive and might need to be removed.
3558        assert(imgpair.hasSubresource);
3559        IMAGE_CMD_BUF_LAYOUT_NODE node;
3560        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3561            node.initialLayout = layout;
3562        }
3563        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3564    }
3565}
3566
3567template <class OBJECT, class LAYOUT>
3568void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3569    if (imgpair.subresource.aspectMask & aspectMask) {
3570        imgpair.subresource.aspectMask = aspectMask;
3571        SetLayout(pObject, imgpair, layout);
3572    }
3573}
3574
3575template <class OBJECT, class LAYOUT>
3576void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3577    ImageSubresourcePair imgpair = {image, true, range};
3578    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3579    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3580    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3581    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3582}
3583
3584template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3585    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3586    SetLayout(pObject, image, imgpair, layout);
3587}
3588
3589void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3590    auto view_state = getImageViewState(dev_data, imageView);
3591    assert(view_state);
3592    auto image = view_state->create_info.image;
3593    const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
3594    // TODO: Do not iterate over every possibility - consolidate where possible
3595    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3596        uint32_t level = subRange.baseMipLevel + j;
3597        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3598            uint32_t layer = subRange.baseArrayLayer + k;
3599            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3600            // TODO: If ImageView was created with depth or stencil, transition both layouts as
3601            // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3602            // is OK for descriptor set layout validation
3603            if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3604                if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
3605                    sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3606                }
3607            }
3608            SetLayout(pCB, image, sub, layout);
3609        }
3610    }
3611}
3612
3613// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3614// func_str is the name of the calling function
3615// Return false if no errors occur
3616// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3617static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
3618    if (dev_data->instance_data->disabled.idle_descriptor_set)
3619        return false;
3620    bool skip_call = false;
3621    auto set_node = dev_data->setMap.find(set);
3622    if (set_node == dev_data->setMap.end()) {
3623        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3624                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3625                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3626                             (uint64_t)(set));
3627    } else {
3628        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
3629        if (set_node->second->in_use.load()) {
3630            skip_call |=
3631                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3632                        (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3633                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3634                        func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
3635        }
3636    }
3637    return skip_call;
3638}
3639
3640// Remove set from setMap and delete the set
3641static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3642    dev_data->setMap.erase(descriptor_set->GetSet());
3643    delete descriptor_set;
3644}
3645// Free all DS Pools including their Sets & related sub-structs
3646// NOTE : Calls to this function should be wrapped in mutex
3647static void deletePools(layer_data *my_data) {
3648    if (my_data->descriptorPoolMap.size() <= 0)
3649        return;
3650    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3651        // Remove this pools' sets from setMap and delete them
3652        for (auto ds : (*ii).second->sets) {
3653            freeDescriptorSet(my_data, ds);
3654        }
3655        (*ii).second->sets.clear();
3656    }
3657    my_data->descriptorPoolMap.clear();
3658}
3659
3660static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3661                                VkDescriptorPoolResetFlags flags) {
3662    DESCRIPTOR_POOL_STATE *pPool = getDescriptorPoolState(my_data, pool);
3663    // TODO: validate flags
3664    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3665    for (auto ds : pPool->sets) {
3666        freeDescriptorSet(my_data, ds);
3667    }
3668    pPool->sets.clear();
3669    // Reset available count for each type and available sets for this pool
3670    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3671        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3672    }
3673    pPool->availableSets = pPool->maxSets;
3674}
3675
3676// For given CB object, fetch associated CB Node from map
3677static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3678    auto it = my_data->commandBufferMap.find(cb);
3679    if (it == my_data->commandBufferMap.end()) {
3680        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3681                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3682                "Attempt to use CommandBuffer 0x%p that doesn't exist!", cb);
3683        return NULL;
3684    }
3685    return it->second;
3686}
3687// Free all CB Nodes
3688// NOTE : Calls to this function should be wrapped in mutex
3689static void deleteCommandBuffers(layer_data *my_data) {
3690    if (my_data->commandBufferMap.empty()) {
3691        return;
3692    }
3693    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3694        delete (*ii).second;
3695    }
3696    my_data->commandBufferMap.clear();
3697}
3698
3699static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3700    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3701                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3702                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3703}
3704
3705// If a renderpass is active, verify that the given command type is appropriate for current subpass state
3706bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3707    if (!pCB->activeRenderPass)
3708        return false;
3709    bool skip_call = false;
3710    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3711        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3712        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3713                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3714                             "Commands cannot be called in a subpass using secondary command buffers.");
3715    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3716        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3717                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3718                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3719    }
3720    return skip_call;
3721}
3722
3723static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3724    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3725        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3726                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3727                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3728    return false;
3729}
3730
3731static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3732    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3733        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3734                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3735                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3736    return false;
3737}
3738
3739static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3740    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3741        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3742                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3743                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3744    return false;
3745}
3746
3747// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not
3748//  in the recording state or if there's an issue with the Cmd ordering
3749static bool ValidateCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3750    bool skip_call = false;
3751    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3752    if (pPool) {
3753        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3754        switch (cmd) {
3755        case CMD_BINDPIPELINE:
3756        case CMD_BINDPIPELINEDELTA:
3757        case CMD_BINDDESCRIPTORSETS:
3758        case CMD_FILLBUFFER:
3759        case CMD_CLEARCOLORIMAGE:
3760        case CMD_SETEVENT:
3761        case CMD_RESETEVENT:
3762        case CMD_WAITEVENTS:
3763        case CMD_BEGINQUERY:
3764        case CMD_ENDQUERY:
3765        case CMD_RESETQUERYPOOL:
3766        case CMD_COPYQUERYPOOLRESULTS:
3767        case CMD_WRITETIMESTAMP:
3768            skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3769            break;
3770        case CMD_SETVIEWPORTSTATE:
3771        case CMD_SETSCISSORSTATE:
3772        case CMD_SETLINEWIDTHSTATE:
3773        case CMD_SETDEPTHBIASSTATE:
3774        case CMD_SETBLENDSTATE:
3775        case CMD_SETDEPTHBOUNDSSTATE:
3776        case CMD_SETSTENCILREADMASKSTATE:
3777        case CMD_SETSTENCILWRITEMASKSTATE:
3778        case CMD_SETSTENCILREFERENCESTATE:
3779        case CMD_BINDINDEXBUFFER:
3780        case CMD_BINDVERTEXBUFFER:
3781        case CMD_DRAW:
3782        case CMD_DRAWINDEXED:
3783        case CMD_DRAWINDIRECT:
3784        case CMD_DRAWINDEXEDINDIRECT:
3785        case CMD_BLITIMAGE:
3786        case CMD_CLEARATTACHMENTS:
3787        case CMD_CLEARDEPTHSTENCILIMAGE:
3788        case CMD_RESOLVEIMAGE:
3789        case CMD_BEGINRENDERPASS:
3790        case CMD_NEXTSUBPASS:
3791        case CMD_ENDRENDERPASS:
3792            skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3793            break;
3794        case CMD_DISPATCH:
3795        case CMD_DISPATCHINDIRECT:
3796            skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3797            break;
3798        case CMD_COPYBUFFER:
3799        case CMD_COPYIMAGE:
3800        case CMD_COPYBUFFERTOIMAGE:
3801        case CMD_COPYIMAGETOBUFFER:
3802        case CMD_CLONEIMAGEDATA:
3803        case CMD_UPDATEBUFFER:
3804        case CMD_PIPELINEBARRIER:
3805        case CMD_EXECUTECOMMANDS:
3806        case CMD_END:
3807            break;
3808        default:
3809            break;
3810        }
3811    }
3812    if (pCB->state != CB_RECORDING) {
3813        skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3814    } else {
3815        skip_call |= ValidateCmdSubpassState(my_data, pCB, cmd);
3816    }
3817    return skip_call;
3818}
3819
3820static void UpdateCmdBufferLastCmd(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd) {
3821    if (cb_state->state == CB_RECORDING) {
3822        cb_state->last_cmd = cmd;
3823    }
3824}
3825// For given object struct return a ptr of BASE_NODE type for its wrapping struct
3826BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
3827    BASE_NODE *base_ptr = nullptr;
3828    switch (object_struct.type) {
3829    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
3830        base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
3831        break;
3832    }
3833    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
3834        base_ptr = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
3835        break;
3836    }
3837    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
3838        base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
3839        break;
3840    }
3841    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
3842        base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
3843        break;
3844    }
3845    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
3846        base_ptr = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
3847        break;
3848    }
3849    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
3850        base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
3851        break;
3852    }
3853    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
3854        base_ptr = getImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
3855        break;
3856    }
3857    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
3858        base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
3859        break;
3860    }
3861    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
3862        base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
3863        break;
3864    }
3865    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
3866        base_ptr = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
3867        break;
3868    }
3869    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
3870        base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
3871        break;
3872    }
3873    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
3874        base_ptr = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
3875        break;
3876    }
3877    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
3878        base_ptr = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
3879        break;
3880    }
3881    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
3882        base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
3883        break;
3884    }
3885    default:
3886        // TODO : Any other objects to be handled here?
3887        assert(0);
3888        break;
3889    }
3890    return base_ptr;
3891}
3892
3893// Tie the VK_OBJECT to the cmd buffer which includes:
3894//  Add object_binding to cmd buffer
3895//  Add cb_binding to object
3896static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
3897    cb_bindings->insert(cb_node);
3898    cb_node->object_bindings.insert(obj);
3899}
3900// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
3901static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
3902    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
3903    if (base_obj)
3904        base_obj->cb_bindings.erase(cb_node);
3905}
3906// Reset the command buffer state
3907//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3908static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3909    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3910    if (pCB) {
3911        pCB->in_use.store(0);
3912        pCB->last_cmd = CMD_NONE;
3913        // Reset CB state (note that createInfo is not cleared)
3914        pCB->commandBuffer = cb;
3915        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3916        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3917        pCB->numCmds = 0;
3918        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3919        pCB->state = CB_NEW;
3920        pCB->submitCount = 0;
3921        pCB->status = 0;
3922        pCB->viewportMask = 0;
3923        pCB->scissorMask = 0;
3924
3925        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3926            pCB->lastBound[i].reset();
3927        }
3928
3929        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3930        pCB->activeRenderPass = nullptr;
3931        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3932        pCB->activeSubpass = 0;
3933        pCB->broken_bindings.clear();
3934        pCB->waitedEvents.clear();
3935        pCB->events.clear();
3936        pCB->writeEventsBeforeWait.clear();
3937        pCB->waitedEventsBeforeQueryReset.clear();
3938        pCB->queryToStateMap.clear();
3939        pCB->activeQueries.clear();
3940        pCB->startedQueries.clear();
3941        pCB->imageSubresourceMap.clear();
3942        pCB->imageLayoutMap.clear();
3943        pCB->eventToStageMap.clear();
3944        pCB->drawData.clear();
3945        pCB->currentDrawData.buffers.clear();
3946        pCB->vertex_buffer_used = false;
3947        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3948        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3949        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3950            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3951        }
3952        pCB->secondaryCommandBuffers.clear();
3953        pCB->updateImages.clear();
3954        pCB->updateBuffers.clear();
3955        clear_cmd_buf_and_mem_references(dev_data, pCB);
3956        pCB->eventUpdates.clear();
3957        pCB->queryUpdates.clear();
3958
3959        // Remove object bindings
3960        for (auto obj : pCB->object_bindings) {
3961            removeCommandBufferBinding(dev_data, &obj, pCB);
3962        }
3963        pCB->object_bindings.clear();
3964        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3965        for (auto framebuffer : pCB->framebuffers) {
3966            auto fb_state = getFramebufferState(dev_data, framebuffer);
3967            if (fb_state)
3968                fb_state->cb_bindings.erase(pCB);
3969        }
3970        pCB->framebuffers.clear();
3971        pCB->activeFramebuffer = VK_NULL_HANDLE;
3972    }
3973}
3974
3975// Set PSO-related status bits for CB, including dynamic state set via PSO
3976static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
3977    // Account for any dynamic state not set via this PSO
3978    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3979        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
3980        pCB->status |= CBSTATUS_ALL_STATE_SET;
3981    } else {
3982        // First consider all state on
3983        // Then unset any state that's noted as dynamic in PSO
3984        // Finally OR that into CB statemask
3985        CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
3986        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3987            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3988            case VK_DYNAMIC_STATE_LINE_WIDTH:
3989                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3990                break;
3991            case VK_DYNAMIC_STATE_DEPTH_BIAS:
3992                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3993                break;
3994            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3995                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3996                break;
3997            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3998                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3999                break;
4000            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4001                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4002                break;
4003            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4004                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4005                break;
4006            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4007                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4008                break;
4009            default:
4010                // TODO : Flag error here
4011                break;
4012            }
4013        }
4014        pCB->status |= psoDynStateMask;
4015    }
4016}
4017
4018// Flags validation error if the associated call is made inside a render pass. The apiName
4019// routine should ONLY be called outside a render pass.
4020static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName,
4021                             UNIQUE_VALIDATION_ERROR_CODE msgCode) {
4022    bool inside = false;
4023    if (pCB->activeRenderPass) {
4024        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4025                         (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
4026                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
4027                         (uint64_t)pCB->activeRenderPass->renderPass, validation_error_map[msgCode]);
4028    }
4029    return inside;
4030}
4031
4032// Flags validation error if the associated call is made outside a render pass. The apiName
4033// routine should ONLY be called inside a render pass.
4034static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName,
4035                              UNIQUE_VALIDATION_ERROR_CODE msgCode) {
4036    bool outside = false;
4037    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4038        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4039         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4040        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4041                          (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
4042                          "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
4043    }
4044    return outside;
4045}
4046
4047static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4048
4049    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4050
4051}
4052
4053static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
4054    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4055        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
4056            instance_data->surfaceExtensionEnabled = true;
4057        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
4058            instance_data->displayExtensionEnabled = true;
4059#ifdef VK_USE_PLATFORM_ANDROID_KHR
4060        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
4061            instance_data->androidSurfaceExtensionEnabled = true;
4062#endif
4063#ifdef VK_USE_PLATFORM_MIR_KHR
4064        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
4065            instance_data->mirSurfaceExtensionEnabled = true;
4066#endif
4067#ifdef VK_USE_PLATFORM_WAYLAND_KHR
4068        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
4069            instance_data->waylandSurfaceExtensionEnabled = true;
4070#endif
4071#ifdef VK_USE_PLATFORM_WIN32_KHR
4072        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
4073            instance_data->win32SurfaceExtensionEnabled = true;
4074#endif
4075#ifdef VK_USE_PLATFORM_XCB_KHR
4076        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
4077            instance_data->xcbSurfaceExtensionEnabled = true;
4078#endif
4079#ifdef VK_USE_PLATFORM_XLIB_KHR
4080        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
4081            instance_data->xlibSurfaceExtensionEnabled = true;
4082#endif
4083    }
4084}
4085
4086VKAPI_ATTR VkResult VKAPI_CALL
4087CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4088    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4089
4090    assert(chain_info->u.pLayerInfo);
4091    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4092    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4093    if (fpCreateInstance == NULL)
4094        return VK_ERROR_INITIALIZATION_FAILED;
4095
4096    // Advance the link info for the next element on the chain
4097    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4098
4099    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4100    if (result != VK_SUCCESS)
4101        return result;
4102
4103    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map);
4104    instance_data->instance = *pInstance;
4105    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
4106
4107    instance_data->report_data = debug_report_create_instance(
4108        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4109    checkInstanceRegisterExtensions(pCreateInfo, instance_data);
4110    init_core_validation(instance_data, pAllocator);
4111
4112    ValidateLayerOrdering(*pCreateInfo);
4113
4114    return result;
4115}
4116
4117// Hook DestroyInstance to remove tableInstanceMap entry
4118VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4119    // TODOSC : Shouldn't need any customization here
4120    dispatch_key key = get_dispatch_key(instance);
4121    // TBD: Need any locking this early, in case this function is called at the
4122    // same time by more than one thread?
4123    instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map);
4124    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
4125
4126    std::lock_guard<std::mutex> lock(global_lock);
4127    // Clean up logging callback, if any
4128    while (instance_data->logging_callback.size() > 0) {
4129        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
4130        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
4131        instance_data->logging_callback.pop_back();
4132    }
4133
4134    layer_debug_report_destroy_instance(instance_data->report_data);
4135    layer_data_map.erase(key);
4136}
4137
4138static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4139    uint32_t i;
4140    // TBD: Need any locking, in case this function is called at the same time
4141    // by more than one thread?
4142    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4143    dev_data->device_extensions.wsi_enabled = false;
4144    dev_data->device_extensions.wsi_display_swapchain_enabled = false;
4145
4146    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4147        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4148            dev_data->device_extensions.wsi_enabled = true;
4149        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4150            dev_data->device_extensions.wsi_display_swapchain_enabled = true;
4151    }
4152}
4153
4154// Verify that queue family has been properly requested
4155static bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu,
4156                                                   const VkDeviceCreateInfo *create_info) {
4157    bool skip_call = false;
4158    auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
4159    // First check is app has actually requested queueFamilyProperties
4160    if (!physical_device_state) {
4161        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4162                             0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4163                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
4164    } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
4165        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
4166        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
4167                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4168                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4169    } else {
4170        // Check that the requested queue properties are valid
4171        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4172            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4173            if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
4174                skip_call |= log_msg(
4175                    instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4176                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4177                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4178            } else if (create_info->pQueueCreateInfos[i].queueCount >
4179                       physical_device_state->queue_family_properties[requestedIndex].queueCount) {
4180                skip_call |=
4181                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4182                            0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4183                            "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4184                            "requested queueCount is %u.",
4185                            requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
4186                            create_info->pQueueCreateInfos[i].queueCount);
4187            }
4188        }
4189    }
4190    return skip_call;
4191}
4192
4193// Verify that features have been queried and that they are available
4194static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys, const VkPhysicalDeviceFeatures *requested_features) {
4195    bool skip_call = false;
4196
4197    auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
4198    const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
4199    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4200    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4201    //  Need to provide the struct member name with the issue. To do that seems like we'll
4202    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4203    uint32_t errors = 0;
4204    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4205    for (uint32_t i = 0; i < total_bools; i++) {
4206        if (requested[i] > actual[i]) {
4207            // TODO: Add index to struct member name helper to be able to include a feature name
4208            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4209                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4210                "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4211                "which is not available on this device.",
4212                i);
4213            errors++;
4214        }
4215    }
4216    if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
4217        // If user didn't request features, notify them that they should
4218        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4219        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4220                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4221                             "DL", "You requested features that are unavailable on this device. You should first query feature "
4222                                   "availability by calling vkGetPhysicalDeviceFeatures().");
4223    }
4224    return skip_call;
4225}
4226
4227VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4228                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4229    instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map);
4230    bool skip_call = false;
4231
4232    // Check that any requested features are available
4233    if (pCreateInfo->pEnabledFeatures) {
4234        skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures);
4235    }
4236    skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo);
4237
4238    if (skip_call) {
4239        return VK_ERROR_VALIDATION_FAILED_EXT;
4240    }
4241
4242    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4243
4244    assert(chain_info->u.pLayerInfo);
4245    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4246    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4247    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4248    if (fpCreateDevice == NULL) {
4249        return VK_ERROR_INITIALIZATION_FAILED;
4250    }
4251
4252    // Advance the link info for the next element on the chain
4253    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4254
4255    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4256    if (result != VK_SUCCESS) {
4257        return result;
4258    }
4259
4260    std::unique_lock<std::mutex> lock(global_lock);
4261    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4262
4263    my_device_data->instance_data = my_instance_data;
4264    // Setup device dispatch table
4265    layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr);
4266    my_device_data->device = *pDevice;
4267    // Save PhysicalDevice handle
4268    my_device_data->physical_device = gpu;
4269
4270    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4271    checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4272    // Get physical device limits for this device
4273    my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4274    uint32_t count;
4275    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4276    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4277    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
4278        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4279    // TODO: device limits should make sure these are compatible
4280    if (pCreateInfo->pEnabledFeatures) {
4281        my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
4282    } else {
4283        memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
4284    }
4285    // Store physical device properties and physical device mem limits into device layer_data structs
4286    my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4287    my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &my_device_data->phys_dev_props);
4288    lock.unlock();
4289
4290    ValidateLayerOrdering(*pCreateInfo);
4291
4292    return result;
4293}
4294
4295// prototype
4296VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4297    // TODOSC : Shouldn't need any customization here
4298    bool skip = false;
4299    dispatch_key key = get_dispatch_key(device);
4300    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4301    // Free all the memory
4302    std::unique_lock<std::mutex> lock(global_lock);
4303    deletePipelines(dev_data);
4304    dev_data->renderPassMap.clear();
4305    deleteCommandBuffers(dev_data);
4306    // This will also delete all sets in the pool & remove them from setMap
4307    deletePools(dev_data);
4308    // All sets should be removed
4309    assert(dev_data->setMap.empty());
4310    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4311        delete del_layout.second;
4312    }
4313    dev_data->descriptorSetLayoutMap.clear();
4314    dev_data->imageViewMap.clear();
4315    dev_data->imageMap.clear();
4316    dev_data->imageSubresourceMap.clear();
4317    dev_data->imageLayoutMap.clear();
4318    dev_data->bufferViewMap.clear();
4319    dev_data->bufferMap.clear();
4320    // Queues persist until device is destroyed
4321    dev_data->queueMap.clear();
4322    // Report any memory leaks
4323    layer_debug_report_destroy_device(device);
4324    lock.unlock();
4325
4326#if DISPATCH_MAP_DEBUG
4327    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4328#endif
4329    if (!skip) {
4330        dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4331        layer_data_map.erase(key);
4332    }
4333}
4334
4335static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4336
4337// This validates that the initial layout specified in the command buffer for
4338// the IMAGE is the same
4339// as the global IMAGE layout
4340static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4341    bool skip_call = false;
4342    for (auto cb_image_data : pCB->imageLayoutMap) {
4343        VkImageLayout imageLayout;
4344        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4345            skip_call |=
4346                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4347                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4348                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4349        } else {
4350            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4351                // TODO: Set memory invalid which is in mem_tracker currently
4352            } else if (imageLayout != cb_image_data.second.initialLayout) {
4353                if (cb_image_data.first.hasSubresource) {
4354                    skip_call |= log_msg(
4355                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4356                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4357                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4358                        "with layout %s when first use is %s.",
4359                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4360                                cb_image_data.first.subresource.arrayLayer,
4361                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4362                        string_VkImageLayout(cb_image_data.second.initialLayout));
4363                } else {
4364                    skip_call |= log_msg(
4365                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4366                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4367                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4368                        "first use is %s.",
4369                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4370                        string_VkImageLayout(cb_image_data.second.initialLayout));
4371                }
4372            }
4373            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4374        }
4375    }
4376    return skip_call;
4377}
4378
4379// Loop through bound objects and increment their in_use counts
4380//  For any unknown objects, flag an error
4381static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4382    bool skip = false;
4383    DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4384    BASE_NODE *base_obj = nullptr;
4385    for (auto obj : cb_node->object_bindings) {
4386        switch (obj.type) {
4387        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4388            base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4389            error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4390            break;
4391        }
4392        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4393            base_obj = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4394            error_code = DRAWSTATE_INVALID_SAMPLER;
4395            break;
4396        }
4397        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4398            base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4399            error_code = DRAWSTATE_INVALID_QUERY_POOL;
4400            break;
4401        }
4402        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4403            base_obj = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4404            error_code = DRAWSTATE_INVALID_PIPELINE;
4405            break;
4406        }
4407        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4408            base_obj = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4409            error_code = DRAWSTATE_INVALID_BUFFER;
4410            break;
4411        }
4412        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4413            base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4414            error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4415            break;
4416        }
4417        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4418            base_obj = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4419            error_code = DRAWSTATE_INVALID_IMAGE;
4420            break;
4421        }
4422        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4423            base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4424            error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4425            break;
4426        }
4427        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4428            base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4429            error_code = DRAWSTATE_INVALID_EVENT;
4430            break;
4431        }
4432        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4433            base_obj = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4434            error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4435            break;
4436        }
4437        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4438            base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4439            error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4440            break;
4441        }
4442        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4443            base_obj = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4444            error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4445            break;
4446        }
4447        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4448            base_obj = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4449            error_code = DRAWSTATE_INVALID_RENDERPASS;
4450            break;
4451        }
4452        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4453            base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4454            error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4455            break;
4456        }
4457        default:
4458            // TODO : Merge handling of other objects types into this code
4459            break;
4460        }
4461        if (!base_obj) {
4462            skip |=
4463                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4464                        "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4465        } else {
4466            base_obj->in_use.fetch_add(1);
4467        }
4468    }
4469    return skip;
4470}
4471
4472// Track which resources are in-flight by atomically incrementing their "in_use" count
4473static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4474    bool skip_call = false;
4475
4476    cb_node->in_use.fetch_add(1);
4477    dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4478
4479    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4480    skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4481    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4482    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4483    //  should then be flagged prior to calling this function
4484    for (auto drawDataElement : cb_node->drawData) {
4485        for (auto buffer : drawDataElement.buffers) {
4486            auto buffer_state = getBufferState(dev_data, buffer);
4487            if (!buffer_state) {
4488                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4489                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4490                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4491            } else {
4492                buffer_state->in_use.fetch_add(1);
4493            }
4494        }
4495    }
4496    for (auto event : cb_node->writeEventsBeforeWait) {
4497        auto event_state = getEventNode(dev_data, event);
4498        if (event_state)
4499            event_state->write_in_use++;
4500    }
4501    return skip_call;
4502}
4503
4504// Note: This function assumes that the global lock is held by the calling thread.
4505// For the given queue, verify the queue state up to the given seq number.
4506// Currently the only check is to make sure that if there are events to be waited on prior to
4507//  a QueryReset, make sure that all such events have been signalled.
4508static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *queue, uint64_t seq) {
4509    bool skip = false;
4510    auto queue_seq = queue->seq;
4511    std::unordered_map<VkQueue, uint64_t> other_queue_seqs;
4512    auto sub_it = queue->submissions.begin();
4513    while (queue_seq < seq) {
4514        for (auto &wait : sub_it->waitSemaphores) {
4515            auto &last_seq = other_queue_seqs[wait.queue];
4516            last_seq = std::max(last_seq, wait.seq);
4517        }
4518        for (auto cb : sub_it->cbs) {
4519            auto cb_node = getCBNode(dev_data, cb);
4520            if (cb_node) {
4521                for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
4522                    for (auto event : queryEventsPair.second) {
4523                        if (dev_data->eventMap[event].needsSignaled) {
4524                            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4525                                            VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4526                                            "Cannot get query results on queryPool 0x%" PRIx64
4527                                            " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4528                                            (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4529                        }
4530                    }
4531                }
4532            }
4533        }
4534        sub_it++;
4535        queue_seq++;
4536    }
4537    for (auto qs : other_queue_seqs) {
4538        skip |= VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, qs.first), qs.second);
4539    }
4540    return skip;
4541}
4542
4543// When the given fence is retired, verify outstanding queue operations through the point of the fence
4544static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
4545    auto fence_state = getFenceNode(dev_data, fence);
4546    if (VK_NULL_HANDLE != fence_state->signaler.first) {
4547        return VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
4548    }
4549    return false;
4550}
4551
4552// TODO: nuke this completely.
4553// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4554static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4555    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4556    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4557    pCB->in_use.fetch_sub(1);
4558    if (!pCB->in_use.load()) {
4559        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4560    }
4561}
4562
4563// Decrement in-use count for objects bound to command buffer
4564static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4565    BASE_NODE *base_obj = nullptr;
4566    for (auto obj : cb_node->object_bindings) {
4567        base_obj = GetStateStructPtrFromObject(dev_data, obj);
4568        if (base_obj) {
4569            base_obj->in_use.fetch_sub(1);
4570        }
4571    }
4572}
4573
4574static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
4575    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4576
4577    // Roll this queue forward, one submission at a time.
4578    while (pQueue->seq < seq) {
4579        auto & submission = pQueue->submissions.front();
4580
4581        for (auto & wait : submission.waitSemaphores) {
4582            auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4583            if (pSemaphore) {
4584                pSemaphore->in_use.fetch_sub(1);
4585            }
4586            auto & lastSeq = otherQueueSeqs[wait.queue];
4587            lastSeq = std::max(lastSeq, wait.seq);
4588        }
4589
4590        for (auto & semaphore : submission.signalSemaphores) {
4591            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4592            if (pSemaphore) {
4593                pSemaphore->in_use.fetch_sub(1);
4594            }
4595        }
4596
4597        for (auto cb : submission.cbs) {
4598            auto cb_node = getCBNode(dev_data, cb);
4599            if (!cb_node) {
4600                continue;
4601            }
4602            // First perform decrement on general case bound objects
4603            DecrementBoundResources(dev_data, cb_node);
4604            for (auto drawDataElement : cb_node->drawData) {
4605                for (auto buffer : drawDataElement.buffers) {
4606                    auto buffer_state = getBufferState(dev_data, buffer);
4607                    if (buffer_state) {
4608                        buffer_state->in_use.fetch_sub(1);
4609                    }
4610                }
4611            }
4612            for (auto event : cb_node->writeEventsBeforeWait) {
4613                auto eventNode = dev_data->eventMap.find(event);
4614                if (eventNode != dev_data->eventMap.end()) {
4615                    eventNode->second.write_in_use--;
4616                }
4617            }
4618            for (auto queryStatePair : cb_node->queryToStateMap) {
4619                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4620            }
4621            for (auto eventStagePair : cb_node->eventToStageMap) {
4622                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4623            }
4624
4625            removeInFlightCmdBuffer(dev_data, cb);
4626        }
4627
4628        auto pFence = getFenceNode(dev_data, submission.fence);
4629        if (pFence) {
4630            pFence->state = FENCE_RETIRED;
4631        }
4632
4633        pQueue->submissions.pop_front();
4634        pQueue->seq++;
4635    }
4636
4637    // Roll other queues forward to the highest seq we saw a wait for
4638    for (auto qs : otherQueueSeqs) {
4639        RetireWorkOnQueue(dev_data, getQueueState(dev_data, qs.first), qs.second);
4640    }
4641}
4642
4643
4644// Submit a fence to a queue, delimiting previous fences and previous untracked
4645// work by it.
4646static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
4647    pFence->state = FENCE_INFLIGHT;
4648    pFence->signaler.first = pQueue->queue;
4649    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4650}
4651
4652static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4653    bool skip_call = false;
4654    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4655        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4656        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4657                             0, __LINE__, VALIDATION_ERROR_00133, "DS",
4658                             "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
4659                             validation_error_map[VALIDATION_ERROR_00133]);
4660    }
4661    return skip_call;
4662}
4663
4664static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
4665    bool skip = false;
4666    if (dev_data->instance_data->disabled.command_buffer_state)
4667        return skip;
4668    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4669    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4670        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4671                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4672                        "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4673                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4674                        pCB->commandBuffer, pCB->submitCount);
4675    }
4676    // Validate that cmd buffers have been updated
4677    if (CB_RECORDED != pCB->state) {
4678        if (CB_INVALID == pCB->state) {
4679            // Inform app of reason CB invalid
4680            for (auto obj : pCB->broken_bindings) {
4681                const char *type_str = object_type_to_string(obj.type);
4682                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4683                const char *cause_str =
4684                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4685
4686                skip |=
4687                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4688                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4689                            "You are submitting command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
4690                            pCB->commandBuffer, type_str, obj.handle, cause_str);
4691            }
4692        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4693            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4694                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4695                            "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!", pCB->commandBuffer,
4696                            call_source);
4697        }
4698    }
4699    return skip;
4700}
4701
4702// Validate that queueFamilyIndices of primary command buffers match this queue
4703// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4704static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4705    bool skip_call = false;
4706    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4707    auto queue_state = getQueueState(dev_data, queue);
4708
4709    if (pPool && queue_state && (pPool->queueFamilyIndex != queue_state->queueFamilyIndex)) {
4710        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4711                             reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_00139, "DS",
4712                             "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
4713                             "0x%p from queue family %d. %s",
4714                             pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
4715                             validation_error_map[VALIDATION_ERROR_00139]);
4716    }
4717
4718    return skip_call;
4719}
4720
4721static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4722    // Track in-use for resources off of primary and any secondary CBs
4723    bool skip_call = false;
4724
4725    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4726    // on device
4727    skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4728
4729    skip_call |= validateAndIncrementResources(dev_data, pCB);
4730
4731    if (!pCB->secondaryCommandBuffers.empty()) {
4732        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4733            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4734            skip_call |= validateAndIncrementResources(dev_data, pSubCB);
4735            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4736                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4737                log_msg(
4738                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4739                    __LINE__, VALIDATION_ERROR_00135, "DS",
4740                    "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
4741                    "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
4742                    pCB->commandBuffer, secondaryCmdBuffer, pSubCB->primaryCommandBuffer,
4743                    validation_error_map[VALIDATION_ERROR_00135]);
4744            }
4745        }
4746    }
4747
4748    skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
4749
4750    return skip_call;
4751}
4752
4753static bool
4754ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4755{
4756    bool skip_call = false;
4757
4758    if (pFence) {
4759        if (pFence->state == FENCE_INFLIGHT) {
4760            // TODO: opportunities for VALIDATION_ERROR_00127, VALIDATION_ERROR_01647, VALIDATION_ERROR_01953
4761            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4762                                 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4763                                 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4764        }
4765
4766        else if (pFence->state == FENCE_RETIRED) {
4767            // TODO: opportunities for VALIDATION_ERROR_00126, VALIDATION_ERROR_01646, VALIDATION_ERROR_01953
4768            skip_call |=
4769                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4770                        reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4771                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4772                        reinterpret_cast<uint64_t &>(pFence->fence));
4773        }
4774    }
4775
4776    return skip_call;
4777}
4778
4779
4780VKAPI_ATTR VkResult VKAPI_CALL
4781QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4782    bool skip_call = false;
4783    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4784    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4785    std::unique_lock<std::mutex> lock(global_lock);
4786
4787    auto pQueue = getQueueState(dev_data, queue);
4788    auto pFence = getFenceNode(dev_data, fence);
4789    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
4790
4791    if (skip_call) {
4792        return VK_ERROR_VALIDATION_FAILED_EXT;
4793    }
4794
4795    // Mark the fence in-use.
4796    if (pFence) {
4797        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
4798    }
4799
4800    // Now verify each individual submit
4801    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4802        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4803        vector<SEMAPHORE_WAIT> semaphore_waits;
4804        vector<VkSemaphore> semaphore_signals;
4805        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4806            VkSemaphore semaphore = submit->pWaitSemaphores[i];
4807            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4808            if (pSemaphore) {
4809                if (pSemaphore->signaled) {
4810                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
4811                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
4812                        pSemaphore->in_use.fetch_add(1);
4813                    }
4814                    pSemaphore->signaler.first = VK_NULL_HANDLE;
4815                    pSemaphore->signaled = false;
4816                } else {
4817                    skip_call |=
4818                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4819                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4820                                "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
4821                                reinterpret_cast<const uint64_t &>(semaphore));
4822                }
4823            }
4824        }
4825        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4826            VkSemaphore semaphore = submit->pSignalSemaphores[i];
4827            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4828            if (pSemaphore) {
4829                if (pSemaphore->signaled) {
4830                    skip_call |=
4831                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4832                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4833                                "Queue 0x%p is signaling semaphore 0x%" PRIx64
4834                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4835                                queue, reinterpret_cast<const uint64_t &>(semaphore),
4836                                reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
4837                } else {
4838                    pSemaphore->signaler.first = queue;
4839                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
4840                    pSemaphore->signaled = true;
4841                    pSemaphore->in_use.fetch_add(1);
4842                    semaphore_signals.push_back(semaphore);
4843                }
4844            }
4845        }
4846
4847        std::vector<VkCommandBuffer> cbs;
4848
4849        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4850            auto cb_node = getCBNode(dev_data, submit->pCommandBuffers[i]);
4851            skip_call |= ValidateCmdBufImageLayouts(dev_data, cb_node);
4852            if (cb_node) {
4853                cbs.push_back(submit->pCommandBuffers[i]);
4854                for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) {
4855                    cbs.push_back(secondaryCmdBuffer);
4856                }
4857
4858                cb_node->submitCount++; // increment submit count
4859                skip_call |= validatePrimaryCommandBufferState(dev_data, cb_node);
4860                skip_call |= validateQueueFamilyIndices(dev_data, cb_node, queue);
4861                // Potential early exit here as bad object state may crash in delayed function calls
4862                if (skip_call)
4863                    return result;
4864                // Call submit-time functions to validate/update state
4865                for (auto &function : cb_node->validate_functions) {
4866                    skip_call |= function();
4867                }
4868                for (auto &function : cb_node->eventUpdates) {
4869                    skip_call |= function(queue);
4870                }
4871                for (auto &function : cb_node->queryUpdates) {
4872                    skip_call |= function(queue);
4873                }
4874            }
4875        }
4876
4877        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
4878                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
4879    }
4880
4881    if (pFence && !submitCount) {
4882        // If no submissions, but just dropping a fence on the end of the queue,
4883        // record an empty submission with just the fence, so we can determine
4884        // its completion.
4885        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
4886                                         std::vector<SEMAPHORE_WAIT>(),
4887                                         std::vector<VkSemaphore>(),
4888                                         fence);
4889    }
4890
4891    lock.unlock();
4892    if (!skip_call)
4893        result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
4894
4895    return result;
4896}
4897
4898static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
4899    bool skip = false;
4900    if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
4901        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4902                        reinterpret_cast<const uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_00611, "MEM",
4903                        "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
4904                        dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
4905                        validation_error_map[VALIDATION_ERROR_00611]);
4906    }
4907    return skip;
4908}
4909
4910static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
4911    add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
4912    return;
4913}
4914
4915VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4916                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4917    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4918    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4919    std::unique_lock<std::mutex> lock(global_lock);
4920    bool skip = PreCallValidateAllocateMemory(dev_data);
4921    if (!skip) {
4922        lock.unlock();
4923        result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4924        lock.lock();
4925        if (VK_SUCCESS == result) {
4926            PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
4927        }
4928    }
4929    return result;
4930}
4931
4932// For given obj node, if it is use, flag a validation error and return callback result, else return false
4933bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
4934                            UNIQUE_VALIDATION_ERROR_CODE error_code) {
4935    if (dev_data->instance_data->disabled.object_in_use)
4936        return false;
4937    bool skip = false;
4938    if (obj_node->in_use.load()) {
4939        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
4940                        error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
4941                        object_type_to_string(obj_struct.type), obj_struct.handle, validation_error_map[error_code]);
4942    }
4943    return skip;
4944}
4945
4946static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
4947    *mem_info = getMemObjInfo(dev_data, mem);
4948    *obj_struct = {reinterpret_cast<uint64_t &>(mem), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT};
4949    if (dev_data->instance_data->disabled.free_memory)
4950        return false;
4951    bool skip = false;
4952    if (*mem_info) {
4953        skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_00620);
4954    }
4955    return skip;
4956}
4957
4958static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
4959    // Clear mem binding for any bound objects
4960    for (auto obj : mem_info->obj_bindings) {
4961        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, MEMTRACK_FREED_MEM_REF,
4962                "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64, obj.handle,
4963                (uint64_t)mem_info->mem);
4964        switch (obj.type) {
4965        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4966            auto image_state = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4967            assert(image_state); // Any destroyed images should already be removed from bindings
4968            image_state->binding.mem = MEMORY_UNBOUND;
4969            break;
4970        }
4971        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4972            auto buffer_state = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4973            assert(buffer_state); // Any destroyed buffers should already be removed from bindings
4974            buffer_state->binding.mem = MEMORY_UNBOUND;
4975            break;
4976        }
4977        default:
4978            // Should only have buffer or image objects bound to memory
4979            assert(0);
4980        }
4981    }
4982    // Any bound cmd buffers are now invalid
4983    invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
4984    dev_data->memObjMap.erase(mem);
4985}
4986
4987VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4988    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4989    DEVICE_MEM_INFO *mem_info = nullptr;
4990    VK_OBJECT obj_struct;
4991    std::unique_lock<std::mutex> lock(global_lock);
4992    bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
4993    if (!skip) {
4994        lock.unlock();
4995        dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
4996        lock.lock();
4997        PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
4998    }
4999}
5000
5001// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
5002//  and that the size of the map range should be:
5003//  1. Not zero
5004//  2. Within the size of the memory allocation
5005static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5006    bool skip_call = false;
5007
5008    if (size == 0) {
5009        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5010                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5011                            "VkMapMemory: Attempting to map memory range of size zero");
5012    }
5013
5014    auto mem_element = my_data->memObjMap.find(mem);
5015    if (mem_element != my_data->memObjMap.end()) {
5016        auto mem_info = mem_element->second.get();
5017        // It is an application error to call VkMapMemory on an object that is already mapped
5018        if (mem_info->mem_range.size != 0) {
5019            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5020                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5021                                "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
5022        }
5023
5024        // Validate that offset + size is within object's allocationSize
5025        if (size == VK_WHOLE_SIZE) {
5026            if (offset >= mem_info->alloc_info.allocationSize) {
5027                skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5028                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5029                                    "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5030                                           " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
5031                                    offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
5032            }
5033        } else {
5034            if ((offset + size) > mem_info->alloc_info.allocationSize) {
5035                skip_call = log_msg(
5036                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5037                    (uint64_t)mem, __LINE__, VALIDATION_ERROR_00628, "MEM",
5038                    "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s", offset,
5039                    size + offset, mem_info->alloc_info.allocationSize, validation_error_map[VALIDATION_ERROR_00628]);
5040            }
5041        }
5042    }
5043    return skip_call;
5044}
5045
5046static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5047    auto mem_info = getMemObjInfo(my_data, mem);
5048    if (mem_info) {
5049        mem_info->mem_range.offset = offset;
5050        mem_info->mem_range.size = size;
5051    }
5052}
5053
5054static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5055    bool skip_call = false;
5056    auto mem_info = getMemObjInfo(my_data, mem);
5057    if (mem_info) {
5058        if (!mem_info->mem_range.size) {
5059            // Valid Usage: memory must currently be mapped
5060            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5061                                (uint64_t)mem, __LINE__, VALIDATION_ERROR_00649, "MEM",
5062                                "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", (uint64_t)mem,
5063                                validation_error_map[VALIDATION_ERROR_00649]);
5064        }
5065        mem_info->mem_range.size = 0;
5066        if (mem_info->shadow_copy) {
5067            free(mem_info->shadow_copy_base);
5068            mem_info->shadow_copy_base = 0;
5069            mem_info->shadow_copy = 0;
5070        }
5071    }
5072    return skip_call;
5073}
5074
5075// Guard value for pad data
5076static char NoncoherentMemoryFillValue = 0xb;
5077
5078static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5079                                     void **ppData) {
5080    auto mem_info = getMemObjInfo(dev_data, mem);
5081    if (mem_info) {
5082        mem_info->p_driver_data = *ppData;
5083        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
5084        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5085            mem_info->shadow_copy = 0;
5086        } else {
5087            if (size == VK_WHOLE_SIZE) {
5088                size = mem_info->alloc_info.allocationSize - offset;
5089            }
5090            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5091            assert(vk_safe_modulo(mem_info->shadow_pad_size,
5092                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5093            // Ensure start of mapped region reflects hardware alignment constraints
5094            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5095
5096            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5097            uint64_t start_offset = offset % map_alignment;
5098            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
5099            mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
5100
5101            mem_info->shadow_copy =
5102                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5103                                         ~(map_alignment - 1)) + start_offset;
5104            assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5105                                  map_alignment) == 0);
5106
5107            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
5108            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
5109        }
5110    }
5111}
5112
5113// Verify that state for fence being waited on is appropriate. That is,
5114//  a fence being waited on should not already be signaled and
5115//  it should have been submitted on a queue or during acquire next image
5116static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
5117    bool skip_call = false;
5118
5119    auto pFence = getFenceNode(dev_data, fence);
5120    if (pFence) {
5121        if (pFence->state == FENCE_UNSIGNALED) {
5122            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5123                                 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5124                                 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5125                                 "acquire next image.",
5126                                 apiCall, reinterpret_cast<uint64_t &>(fence));
5127        }
5128    }
5129    return skip_call;
5130}
5131
5132static void RetireFence(layer_data *dev_data, VkFence fence) {
5133    auto pFence = getFenceNode(dev_data, fence);
5134    if (pFence->signaler.first != VK_NULL_HANDLE) {
5135        // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
5136        RetireWorkOnQueue(dev_data, getQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
5137    }
5138    else {
5139        // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
5140        // the fence as retired.
5141        pFence->state = FENCE_RETIRED;
5142    }
5143}
5144
5145static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
5146    if (dev_data->instance_data->disabled.wait_for_fences)
5147        return false;
5148    bool skip = false;
5149    for (uint32_t i = 0; i < fence_count; i++) {
5150        skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
5151        skip |= VerifyQueueStateToFence(dev_data, fences[i]);
5152    }
5153    return skip;
5154}
5155
5156static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
5157    // When we know that all fences are complete we can clean/remove their CBs
5158    if ((VK_TRUE == wait_all) || (1 == fence_count)) {
5159        for (uint32_t i = 0; i < fence_count; i++) {
5160            RetireFence(dev_data, fences[i]);
5161        }
5162    }
5163    // NOTE : Alternate case not handled here is when some fences have completed. In
5164    //  this case for app to guarantee which fences completed it will have to call
5165    //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5166}
5167
5168VKAPI_ATTR VkResult VKAPI_CALL
5169WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5170    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5171    // Verify fence status of submitted fences
5172    std::unique_lock<std::mutex> lock(global_lock);
5173    bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
5174    lock.unlock();
5175    if (skip)
5176        return VK_ERROR_VALIDATION_FAILED_EXT;
5177
5178    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5179
5180    if (result == VK_SUCCESS) {
5181        lock.lock();
5182        PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
5183        lock.unlock();
5184    }
5185    return result;
5186}
5187
5188static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
5189    if (dev_data->instance_data->disabled.get_fence_state)
5190        return false;
5191    return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5192}
5193
5194static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
5195
5196VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
5197    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5198    std::unique_lock<std::mutex> lock(global_lock);
5199    bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
5200    lock.unlock();
5201    if (skip)
5202        return VK_ERROR_VALIDATION_FAILED_EXT;
5203
5204    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
5205    if (result == VK_SUCCESS) {
5206        lock.lock();
5207        PostCallRecordGetFenceStatus(dev_data, fence);
5208        lock.unlock();
5209    }
5210    return result;
5211}
5212
5213static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
5214    // Add queue to tracking set only if it is new
5215    auto result = dev_data->queues.emplace(queue);
5216    if (result.second == true) {
5217        QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
5218        queue_state->queue = queue;
5219        queue_state->queueFamilyIndex = q_family_index;
5220        queue_state->seq = 0;
5221    }
5222}
5223
5224VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5225                                                            VkQueue *pQueue) {
5226    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5227    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5228    std::lock_guard<std::mutex> lock(global_lock);
5229
5230    PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
5231}
5232
5233static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
5234    *queue_state = getQueueState(dev_data, queue);
5235    if (dev_data->instance_data->disabled.queue_wait_idle)
5236        return false;
5237    return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
5238}
5239
5240static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
5241    RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
5242}
5243
5244VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5245    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5246    QUEUE_STATE *queue_state = nullptr;
5247    std::unique_lock<std::mutex> lock(global_lock);
5248    bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
5249    lock.unlock();
5250    if (skip)
5251        return VK_ERROR_VALIDATION_FAILED_EXT;
5252    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
5253    if (VK_SUCCESS == result) {
5254        lock.lock();
5255        PostCallRecordQueueWaitIdle(dev_data, queue_state);
5256        lock.unlock();
5257    }
5258    return result;
5259}
5260
5261static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
5262    if (dev_data->instance_data->disabled.device_wait_idle)
5263        return false;
5264    bool skip = false;
5265    for (auto &queue : dev_data->queueMap) {
5266        skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5267    }
5268    return skip;
5269}
5270
5271static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
5272    for (auto &queue : dev_data->queueMap) {
5273        RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5274    }
5275}
5276
5277VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5278    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5279    std::unique_lock<std::mutex> lock(global_lock);
5280    bool skip = PreCallValidateDeviceWaitIdle(dev_data);
5281    lock.unlock();
5282    if (skip)
5283        return VK_ERROR_VALIDATION_FAILED_EXT;
5284    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
5285    if (VK_SUCCESS == result) {
5286        lock.lock();
5287        PostCallRecordDeviceWaitIdle(dev_data);
5288        lock.unlock();
5289    }
5290    return result;
5291}
5292
5293static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
5294    *fence_node = getFenceNode(dev_data, fence);
5295    *obj_struct = {reinterpret_cast<uint64_t &>(fence), VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT};
5296    if (dev_data->instance_data->disabled.destroy_fence)
5297        return false;
5298    bool skip = false;
5299    if (*fence_node) {
5300        if ((*fence_node)->state == FENCE_INFLIGHT) {
5301            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5302                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5303                            (uint64_t)(fence));
5304        }
5305    }
5306    return skip;
5307}
5308
5309static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
5310
5311VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5312    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5313    // Common data objects used pre & post call
5314    FENCE_NODE *fence_node = nullptr;
5315    VK_OBJECT obj_struct;
5316    std::unique_lock<std::mutex> lock(global_lock);
5317    bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
5318
5319    if (!skip) {
5320        lock.unlock();
5321        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
5322        lock.lock();
5323        PostCallRecordDestroyFence(dev_data, fence);
5324    }
5325}
5326
5327static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
5328                                            VK_OBJECT *obj_struct) {
5329    *sema_node = getSemaphoreNode(dev_data, semaphore);
5330    *obj_struct = {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT};
5331    if (dev_data->instance_data->disabled.destroy_semaphore)
5332        return false;
5333    bool skip = false;
5334    if (*sema_node) {
5335        skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_00199);
5336    }
5337    return skip;
5338}
5339
5340static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
5341
5342VKAPI_ATTR void VKAPI_CALL
5343DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5344    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5345    SEMAPHORE_NODE *sema_node;
5346    VK_OBJECT obj_struct;
5347    std::unique_lock<std::mutex> lock(global_lock);
5348    bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
5349    if (!skip) {
5350        lock.unlock();
5351        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
5352        lock.lock();
5353        PostCallRecordDestroySemaphore(dev_data, semaphore);
5354    }
5355}
5356
5357static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
5358    *event_state = getEventNode(dev_data, event);
5359    *obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5360    if (dev_data->instance_data->disabled.destroy_event)
5361        return false;
5362    bool skip = false;
5363    if (*event_state) {
5364        skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_00213);
5365    }
5366    return skip;
5367}
5368
5369static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
5370    invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
5371    dev_data->eventMap.erase(event);
5372}
5373
5374VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5375    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5376    EVENT_STATE *event_state = nullptr;
5377    VK_OBJECT obj_struct;
5378    std::unique_lock<std::mutex> lock(global_lock);
5379    bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
5380    if (!skip) {
5381        lock.unlock();
5382        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
5383        lock.lock();
5384        PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
5385    }
5386}
5387
5388static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
5389                                            VK_OBJECT *obj_struct) {
5390    *qp_state = getQueryPoolNode(dev_data, query_pool);
5391    *obj_struct = {reinterpret_cast<uint64_t &>(query_pool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5392    if (dev_data->instance_data->disabled.destroy_query_pool)
5393        return false;
5394    bool skip = false;
5395    if (*qp_state) {
5396        skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_01012);
5397    }
5398    return skip;
5399}
5400
5401static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state, VK_OBJECT obj_struct) {
5402    invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
5403    dev_data->queryPoolMap.erase(query_pool);
5404}
5405
5406VKAPI_ATTR void VKAPI_CALL
5407DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5408    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5409    QUERY_POOL_NODE *qp_state = nullptr;
5410    VK_OBJECT obj_struct;
5411    std::unique_lock<std::mutex> lock(global_lock);
5412    bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
5413    if (!skip) {
5414        lock.unlock();
5415        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
5416        lock.lock();
5417        PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
5418    }
5419}
5420static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5421                                               uint32_t query_count, VkQueryResultFlags flags,
5422                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5423    for (auto cmd_buffer : dev_data->globalInFlightCmdBuffers) {
5424        auto cb = getCBNode(dev_data, cmd_buffer);
5425        for (auto query_state_pair : cb->queryToStateMap) {
5426            (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer);
5427        }
5428    }
5429    if (dev_data->instance_data->disabled.get_query_pool_results)
5430        return false;
5431    bool skip = false;
5432    for (uint32_t i = 0; i < query_count; ++i) {
5433        QueryObject query = {query_pool, first_query + i};
5434        auto qif_pair = queries_in_flight->find(query);
5435        auto query_state_pair = dev_data->queryToStateMap.find(query);
5436        if (query_state_pair != dev_data->queryToStateMap.end()) {
5437            // Available and in flight
5438            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5439                query_state_pair->second) {
5440                for (auto cmd_buffer : qif_pair->second) {
5441                    auto cb = getCBNode(dev_data, cmd_buffer);
5442                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5443                    if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
5444                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5445                                        VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5446                                        "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5447                                        (uint64_t)(query_pool), first_query + i);
5448                    }
5449                }
5450                // Unavailable and in flight
5451            } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5452                       !query_state_pair->second) {
5453                // TODO : Can there be the same query in use by multiple command buffers in flight?
5454                bool make_available = false;
5455                for (auto cmd_buffer : qif_pair->second) {
5456                    auto cb = getCBNode(dev_data, cmd_buffer);
5457                    make_available |= cb->queryToStateMap[query];
5458                }
5459                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5460                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5461                                    VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5462                                    "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5463                                    (uint64_t)(query_pool), first_query + i);
5464                }
5465                // Unavailable
5466            } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
5467                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5468                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5469                                "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5470                                (uint64_t)(query_pool), first_query + i);
5471                // Uninitialized
5472            } else if (query_state_pair == dev_data->queryToStateMap.end()) {
5473                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5474                                __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5475                                "Cannot get query results on queryPool 0x%" PRIx64
5476                                " with index %d as data has not been collected for this index.",
5477                                (uint64_t)(query_pool), first_query + i);
5478            }
5479        }
5480    }
5481    return skip;
5482}
5483
5484static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5485                                              uint32_t query_count,
5486                                              unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5487    for (uint32_t i = 0; i < query_count; ++i) {
5488        QueryObject query = {query_pool, first_query + i};
5489        auto qif_pair = queries_in_flight->find(query);
5490        auto query_state_pair = dev_data->queryToStateMap.find(query);
5491        if (query_state_pair != dev_data->queryToStateMap.end()) {
5492            // Available and in flight
5493            if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5494                query_state_pair->second) {
5495                for (auto cmd_buffer : qif_pair->second) {
5496                    auto cb = getCBNode(dev_data, cmd_buffer);
5497                    auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5498                    if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
5499                        for (auto event : query_event_pair->second) {
5500                            dev_data->eventMap[event].needsSignaled = true;
5501                        }
5502                    }
5503                }
5504            }
5505        }
5506    }
5507}
5508
5509VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
5510                                                   size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
5511    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5512    unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
5513    std::unique_lock<std::mutex> lock(global_lock);
5514    bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
5515    lock.unlock();
5516    if (skip)
5517        return VK_ERROR_VALIDATION_FAILED_EXT;
5518    VkResult result =
5519        dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5520    lock.lock();
5521    PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
5522    lock.unlock();
5523    return result;
5524}
5525
5526static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5527    bool skip_call = false;
5528    auto buffer_state = getBufferState(my_data, buffer);
5529    if (!buffer_state) {
5530        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5531                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5532                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5533    } else {
5534        if (buffer_state->in_use.load()) {
5535            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5536                                 (uint64_t)(buffer), __LINE__, VALIDATION_ERROR_00676, "DS",
5537                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer. %s", (uint64_t)(buffer),
5538                                 validation_error_map[VALIDATION_ERROR_00676]);
5539        }
5540    }
5541    return skip_call;
5542}
5543
5544// Return true if given ranges intersect, else false
5545// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5546//  in an error so not checking that here
5547// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5548// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5549//  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5550static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5551    *skip_call = false;
5552    auto r1_start = range1->start;
5553    auto r1_end = range1->end;
5554    auto r2_start = range2->start;
5555    auto r2_end = range2->end;
5556    VkDeviceSize pad_align = 1;
5557    if (range1->linear != range2->linear) {
5558        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5559    }
5560    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5561        return false;
5562    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5563        return false;
5564
5565    if (range1->linear != range2->linear) {
5566        // In linear vs. non-linear case, warn of aliasing
5567        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5568        const char *r1_type_str = range1->image ? "image" : "buffer";
5569        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5570        const char *r2_type_str = range2->image ? "image" : "buffer";
5571        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5572        *skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
5573                              MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5574                                                                " which may indicate a bug. For further info refer to the "
5575                                                                "Buffer-Image Granularity section of the Vulkan specification. "
5576                                                                "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
5577                                                                "xhtml/vkspec.html#resources-bufferimagegranularity)",
5578                              r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5579    }
5580    // Ranges intersect
5581    return true;
5582}
5583// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5584static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5585    // Create a local MEMORY_RANGE struct to wrap offset/size
5586    MEMORY_RANGE range_wrap;
5587    // Synch linear with range1 to avoid padding and potential validation error case
5588    range_wrap.linear = range1->linear;
5589    range_wrap.start = offset;
5590    range_wrap.end = end;
5591    bool tmp_bool;
5592    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5593}
5594// For given mem_info, set all ranges valid that intersect [offset-end] range
5595// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5596static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5597    bool tmp_bool = false;
5598    MEMORY_RANGE map_range = {};
5599    map_range.linear = true;
5600    map_range.start = offset;
5601    map_range.end = end;
5602    for (auto &handle_range_pair : mem_info->bound_ranges) {
5603        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5604            // TODO : WARN here if tmp_bool true?
5605            handle_range_pair.second.valid = true;
5606        }
5607    }
5608}
5609// Object with given handle is being bound to memory w/ given mem_info struct.
5610//  Track the newly bound memory range with given memoryOffset
5611//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5612//  and non-linear range incorrectly overlap.
5613// Return true if an error is flagged and the user callback returns "true", otherwise false
5614// is_image indicates an image object, otherwise handle is for a buffer
5615// is_linear indicates a buffer or linear image
5616static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5617                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5618    bool skip_call = false;
5619    MEMORY_RANGE range;
5620
5621    range.image = is_image;
5622    range.handle = handle;
5623    range.linear = is_linear;
5624    range.valid = mem_info->global_valid;
5625    range.memory = mem_info->mem;
5626    range.start = memoryOffset;
5627    range.size = memRequirements.size;
5628    range.end = memoryOffset + memRequirements.size - 1;
5629    range.aliases.clear();
5630    // Update Memory aliasing
5631    // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5632    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5633    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5634    for (auto &obj_range_pair : mem_info->bound_ranges) {
5635        auto check_range = &obj_range_pair.second;
5636        bool intersection_error = false;
5637        if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5638            skip_call |= intersection_error;
5639            range.aliases.insert(check_range);
5640            tmp_alias_ranges.insert(check_range);
5641        }
5642    }
5643    mem_info->bound_ranges[handle] = std::move(range);
5644    for (auto tmp_range : tmp_alias_ranges) {
5645        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5646    }
5647    if (is_image)
5648        mem_info->bound_images.insert(handle);
5649    else
5650        mem_info->bound_buffers.insert(handle);
5651
5652    return skip_call;
5653}
5654
5655static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5656                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5657    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5658}
5659
5660static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5661                                    VkMemoryRequirements mem_reqs) {
5662    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5663}
5664
5665// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5666//  is_image indicates if handle is for image or buffer
5667//  This function will also remove the handle-to-index mapping from the appropriate
5668//  map and clean up any aliases for range being removed.
5669static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5670    auto erase_range = &mem_info->bound_ranges[handle];
5671    for (auto alias_range : erase_range->aliases) {
5672        alias_range->aliases.erase(erase_range);
5673    }
5674    erase_range->aliases.clear();
5675    mem_info->bound_ranges.erase(handle);
5676    if (is_image) {
5677        mem_info->bound_images.erase(handle);
5678    } else {
5679        mem_info->bound_buffers.erase(handle);
5680    }
5681}
5682
5683static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5684
5685static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5686
5687static bool PreCallValidateDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE **buffer_state,
5688                                         VK_OBJECT *obj_struct) {
5689    *buffer_state = getBufferState(dev_data, buffer);
5690    *obj_struct = {reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT};
5691    if (dev_data->instance_data->disabled.destroy_buffer)
5692        return false;
5693    bool skip = false;
5694    if (*buffer_state) {
5695        skip |= validateIdleBuffer(dev_data, buffer);
5696    }
5697    return skip;
5698}
5699
5700static void PostCallRecordDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VK_OBJECT obj_struct) {
5701    invalidateCommandBuffers(dev_data, buffer_state->cb_bindings, obj_struct);
5702    for (auto mem_binding : buffer_state->GetBoundMemory()) {
5703        auto mem_info = getMemObjInfo(dev_data, mem_binding);
5704        if (mem_info) {
5705            RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5706        }
5707    }
5708    ClearMemoryObjectBindings(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5709    dev_data->bufferMap.erase(buffer_state->buffer);
5710}
5711
5712VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5713                                         const VkAllocationCallbacks *pAllocator) {
5714    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5715    BUFFER_STATE *buffer_state = nullptr;
5716    VK_OBJECT obj_struct;
5717    std::unique_lock<std::mutex> lock(global_lock);
5718    bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
5719    if (!skip) {
5720        lock.unlock();
5721        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
5722        lock.lock();
5723        PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
5724    }
5725}
5726
5727static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5728                                             VK_OBJECT *obj_struct) {
5729    *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5730    *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
5731    if (dev_data->instance_data->disabled.destroy_buffer_view)
5732        return false;
5733    bool skip = false;
5734    if (*buffer_view_state) {
5735        skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct, VALIDATION_ERROR_00701);
5736    }
5737    return skip;
5738}
5739
5740static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5741                                            VK_OBJECT obj_struct) {
5742    // Any bound cmd buffers are now invalid
5743    invalidateCommandBuffers(dev_data, buffer_view_state->cb_bindings, obj_struct);
5744    dev_data->bufferViewMap.erase(buffer_view);
5745}
5746
5747VKAPI_ATTR void VKAPI_CALL
5748DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5749    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5750    // Common data objects used pre & post call
5751    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5752    VK_OBJECT obj_struct;
5753    std::unique_lock<std::mutex> lock(global_lock);
5754    // Validate state before calling down chain, update common data if we'll be calling down chain
5755    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
5756    if (!skip) {
5757        lock.unlock();
5758        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
5759        lock.lock();
5760        PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
5761    }
5762}
5763
5764static bool PreCallValidateDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE **image_state, VK_OBJECT *obj_struct) {
5765    *image_state = getImageState(dev_data, image);
5766    *obj_struct = {reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT};
5767    if (dev_data->instance_data->disabled.destroy_image)
5768        return false;
5769    bool skip = false;
5770    if (*image_state) {
5771        skip |= ValidateObjectNotInUse(dev_data, *image_state, *obj_struct, VALIDATION_ERROR_00743);
5772    }
5773    return skip;
5774}
5775
5776static void PostCallRecordDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VK_OBJECT obj_struct) {
5777    invalidateCommandBuffers(dev_data, image_state->cb_bindings, obj_struct);
5778    // Clean up memory mapping, bindings and range references for image
5779    for (auto mem_binding : image_state->GetBoundMemory()) {
5780        auto mem_info = getMemObjInfo(dev_data, mem_binding);
5781        if (mem_info) {
5782            RemoveImageMemoryRange(obj_struct.handle, mem_info);
5783        }
5784    }
5785    ClearMemoryObjectBindings(dev_data, obj_struct.handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5786    // Remove image from imageMap
5787    dev_data->imageMap.erase(image);
5788
5789    const auto &sub_entry = dev_data->imageSubresourceMap.find(image);
5790    if (sub_entry != dev_data->imageSubresourceMap.end()) {
5791        for (const auto &pair : sub_entry->second) {
5792            dev_data->imageLayoutMap.erase(pair);
5793        }
5794        dev_data->imageSubresourceMap.erase(sub_entry);
5795    }
5796}
5797
5798VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5799    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5800    IMAGE_STATE *image_state = nullptr;
5801    VK_OBJECT obj_struct;
5802    std::unique_lock<std::mutex> lock(global_lock);
5803    bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
5804    if (!skip) {
5805        lock.unlock();
5806        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
5807        lock.lock();
5808        PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
5809    }
5810}
5811
5812static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5813                                const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
5814    bool skip_call = false;
5815    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5816        skip_call =
5817            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5818                    reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, msgCode, "MT",
5819                    "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5820                    "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
5821                    funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
5822                    reinterpret_cast<const uint64_t &>(mem_info->mem), validation_error_map[msgCode]);
5823    }
5824    return skip_call;
5825}
5826
5827VKAPI_ATTR VkResult VKAPI_CALL
5828BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5829    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5830    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5831    std::unique_lock<std::mutex> lock(global_lock);
5832    // Track objects tied to memory
5833    uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5834    bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5835    auto buffer_state = getBufferState(dev_data, buffer);
5836    if (buffer_state) {
5837        if (!buffer_state->memory_requirements_checked) {
5838            // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
5839            //  BindBufferMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
5840            //  vkGetBufferMemoryRequirements()
5841            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5842                                 buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
5843                                 "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
5844                                 " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
5845                                 buffer_handle);
5846            // Make the call for them so we can verify the state
5847            lock.unlock();
5848            dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &buffer_state->requirements);
5849            lock.lock();
5850        }
5851        buffer_state->binding.mem = mem;
5852        buffer_state->binding.offset = memoryOffset;
5853        buffer_state->binding.size = buffer_state->requirements.size;
5854
5855        // Track and validate bound memory range information
5856        auto mem_info = getMemObjInfo(dev_data, mem);
5857        if (mem_info) {
5858            skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
5859            skip_call |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
5860                                             VALIDATION_ERROR_00797);
5861        }
5862
5863        // Validate memory requirements alignment
5864        if (vk_safe_modulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
5865            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5866                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_02174, "DS",
5867                                 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5868                                 "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5869                                 ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
5870                                 memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_02174]);
5871        }
5872
5873        // Validate device limits alignments
5874        static const VkBufferUsageFlagBits usage_list[3] = {
5875            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5876            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5877            VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5878        static const char *memory_type[3] = {"texel",
5879                                             "uniform",
5880                                             "storage"};
5881        static const char *offset_name[3] = {
5882            "minTexelBufferOffsetAlignment",
5883            "minUniformBufferOffsetAlignment",
5884            "minStorageBufferOffsetAlignment"
5885        };
5886        static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = {
5887            VALIDATION_ERROR_00794,
5888            VALIDATION_ERROR_00795,
5889            VALIDATION_ERROR_00796
5890        };
5891
5892        // Keep this one fresh!
5893        const VkDeviceSize offset_requirement[3] = {
5894            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5895            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5896            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5897        };
5898        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5899
5900        for (int i = 0; i < 3; i++) {
5901            if (usage & usage_list[i]) {
5902                if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5903                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5904                                         VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, msgCode[i], "DS",
5905                                         "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5906                                         "device limit %s 0x%" PRIxLEAST64 ". %s",
5907                                         memory_type[i], memoryOffset, offset_name[i], offset_requirement[i],
5908                                         validation_error_map[msgCode[i]]);
5909                }
5910            }
5911        }
5912    }
5913    lock.unlock();
5914    if (!skip_call) {
5915        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
5916    }
5917    return result;
5918}
5919
5920VKAPI_ATTR void VKAPI_CALL
5921GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5922    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5923    dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5924    auto buffer_state = getBufferState(dev_data, buffer);
5925    if (buffer_state) {
5926        buffer_state->requirements = *pMemoryRequirements;
5927        buffer_state->memory_requirements_checked = true;
5928    }
5929}
5930
5931VKAPI_ATTR void VKAPI_CALL
5932GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5933    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5934    dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5935    auto image_state = getImageState(dev_data, image);
5936    if (image_state) {
5937        image_state->requirements = *pMemoryRequirements;
5938        image_state->memory_requirements_checked = true;
5939    }
5940}
5941
5942static bool PreCallValidateDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state,
5943                                            VK_OBJECT *obj_struct) {
5944    *image_view_state = getImageViewState(dev_data, image_view);
5945    *obj_struct = {reinterpret_cast<uint64_t &>(image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5946    if (dev_data->instance_data->disabled.destroy_image_view)
5947        return false;
5948    bool skip = false;
5949    if (*image_view_state) {
5950        skip |= ValidateObjectNotInUse(dev_data, *image_view_state, *obj_struct, VALIDATION_ERROR_00776);
5951    }
5952    return skip;
5953}
5954
5955static void PostCallRecordDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state,
5956                                           VK_OBJECT obj_struct) {
5957    // Any bound cmd buffers are now invalid
5958    invalidateCommandBuffers(dev_data, image_view_state->cb_bindings, obj_struct);
5959    dev_data->imageViewMap.erase(image_view);
5960}
5961
5962VKAPI_ATTR void VKAPI_CALL
5963DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5964    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5965    // Common data objects used pre & post call
5966    IMAGE_VIEW_STATE *image_view_state = nullptr;
5967    VK_OBJECT obj_struct;
5968    std::unique_lock<std::mutex> lock(global_lock);
5969    bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
5970    if (!skip) {
5971        lock.unlock();
5972        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
5973        lock.lock();
5974        PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
5975    }
5976}
5977
5978VKAPI_ATTR void VKAPI_CALL
5979DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5980    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5981
5982    std::unique_lock<std::mutex> lock(global_lock);
5983    my_data->shaderModuleMap.erase(shaderModule);
5984    lock.unlock();
5985
5986    my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
5987}
5988
5989static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
5990                                           VK_OBJECT *obj_struct) {
5991    *pipeline_state = getPipelineState(dev_data, pipeline);
5992    *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
5993    if (dev_data->instance_data->disabled.destroy_pipeline)
5994        return false;
5995    bool skip = false;
5996    if (*pipeline_state) {
5997        skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555);
5998    }
5999    return skip;
6000}
6001
6002static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
6003                                          VK_OBJECT obj_struct) {
6004    // Any bound cmd buffers are now invalid
6005    invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
6006    dev_data->pipelineMap.erase(pipeline);
6007}
6008
6009VKAPI_ATTR void VKAPI_CALL
6010DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6011    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6012    PIPELINE_STATE *pipeline_state = nullptr;
6013    VK_OBJECT obj_struct;
6014    std::unique_lock<std::mutex> lock(global_lock);
6015    bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
6016    if (!skip) {
6017        lock.unlock();
6018        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
6019        lock.lock();
6020        PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
6021    }
6022}
6023
6024VKAPI_ATTR void VKAPI_CALL
6025DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6026    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6027    std::unique_lock<std::mutex> lock(global_lock);
6028    dev_data->pipelineLayoutMap.erase(pipelineLayout);
6029    lock.unlock();
6030
6031    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6032}
6033
6034static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
6035                                          VK_OBJECT *obj_struct) {
6036    *sampler_state = getSamplerState(dev_data, sampler);
6037    *obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
6038    if (dev_data->instance_data->disabled.destroy_sampler)
6039        return false;
6040    bool skip = false;
6041    if (*sampler_state) {
6042        skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_00837);
6043    }
6044    return skip;
6045}
6046
6047static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
6048                                         VK_OBJECT obj_struct) {
6049    // Any bound cmd buffers are now invalid
6050    if (sampler_state)
6051        invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
6052    dev_data->samplerMap.erase(sampler);
6053}
6054
6055VKAPI_ATTR void VKAPI_CALL
6056DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6057    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6058    SAMPLER_STATE *sampler_state = nullptr;
6059    VK_OBJECT obj_struct;
6060    std::unique_lock<std::mutex> lock(global_lock);
6061    bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
6062    if (!skip) {
6063        lock.unlock();
6064        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
6065        lock.lock();
6066        PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
6067    }
6068}
6069
6070static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
6071    dev_data->descriptorSetLayoutMap.erase(ds_layout);
6072}
6073
6074VKAPI_ATTR void VKAPI_CALL
6075DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6076    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6077    dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6078    std::unique_lock<std::mutex> lock(global_lock);
6079    PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
6080}
6081
6082static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
6083                                                 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
6084    *desc_pool_state = getDescriptorPoolState(dev_data, pool);
6085    *obj_struct = {reinterpret_cast<uint64_t &>(pool), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT};
6086    if (dev_data->instance_data->disabled.destroy_descriptor_pool)
6087        return false;
6088    bool skip = false;
6089    if (*desc_pool_state) {
6090        skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901);
6091    }
6092    return skip;
6093}
6094
6095static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
6096                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
6097    // Any bound cmd buffers are now invalid
6098    invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
6099    // Free sets that were in this pool
6100    for (auto ds : desc_pool_state->sets) {
6101        freeDescriptorSet(dev_data, ds);
6102    }
6103    dev_data->descriptorPoolMap.erase(descriptorPool);
6104}
6105
6106VKAPI_ATTR void VKAPI_CALL
6107DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6108    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6109    DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
6110    VK_OBJECT obj_struct;
6111    std::unique_lock<std::mutex> lock(global_lock);
6112    bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
6113    if (!skip) {
6114        lock.unlock();
6115        dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
6116        lock.lock();
6117        PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
6118    }
6119}
6120// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
6121//  If this is a secondary command buffer, then make sure its primary is also in-flight
6122//  If primary is not in-flight, then remove secondary from global in-flight set
6123// This function is only valid at a point when cmdBuffer is being reset or freed
6124static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
6125                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
6126    bool skip_call = false;
6127    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
6128        // Primary CB or secondary where primary is also in-flight is an error
6129        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
6130            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
6131            skip_call |=
6132                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6133                        reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, error_code, "DS",
6134                        "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
6135                        validation_error_map[error_code]);
6136        }
6137    }
6138    return skip_call;
6139}
6140
6141// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
6142static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
6143                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
6144    bool skip_call = false;
6145    for (auto cmd_buffer : pPool->commandBuffers) {
6146        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
6147            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action, error_code);
6148        }
6149    }
6150    return skip_call;
6151}
6152
6153static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
6154    for (auto cmd_buffer : pPool->commandBuffers) {
6155        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
6156    }
6157}
6158
6159VKAPI_ATTR void VKAPI_CALL
6160FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6161    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6162    bool skip_call = false;
6163    std::unique_lock<std::mutex> lock(global_lock);
6164
6165    for (uint32_t i = 0; i < commandBufferCount; i++) {
6166        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6167        // Delete CB information structure, and remove from commandBufferMap
6168        if (cb_node) {
6169            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_00096);
6170        }
6171    }
6172
6173    if (skip_call)
6174        return;
6175
6176    auto pPool = getCommandPoolNode(dev_data, commandPool);
6177    for (uint32_t i = 0; i < commandBufferCount; i++) {
6178        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6179        // Delete CB information structure, and remove from commandBufferMap
6180        if (cb_node) {
6181            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
6182            // reset prior to delete for data clean-up
6183            resetCB(dev_data, cb_node->commandBuffer);
6184            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
6185            delete cb_node;
6186        }
6187
6188        // Remove commandBuffer reference from commandPoolMap
6189        pPool->commandBuffers.remove(pCommandBuffers[i]);
6190    }
6191    lock.unlock();
6192
6193    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6194}
6195
6196VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6197                                                 const VkAllocationCallbacks *pAllocator,
6198                                                 VkCommandPool *pCommandPool) {
6199    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6200
6201    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6202
6203    if (VK_SUCCESS == result) {
6204        std::lock_guard<std::mutex> lock(global_lock);
6205        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6206        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6207    }
6208    return result;
6209}
6210
6211VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6212                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6213
6214    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6215    bool skip = false;
6216    if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
6217        if (!dev_data->enabled_features.pipelineStatisticsQuery) {
6218            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
6219                            __LINE__, VALIDATION_ERROR_01006, "DS",
6220                            "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
6221                            "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
6222                            validation_error_map[VALIDATION_ERROR_01006]);
6223        }
6224    }
6225
6226    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6227    if (!skip) {
6228        result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6229    }
6230    if (result == VK_SUCCESS) {
6231        std::lock_guard<std::mutex> lock(global_lock);
6232        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6233        qp_node->createInfo = *pCreateInfo;
6234    }
6235    return result;
6236}
6237
6238static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
6239    *cp_state = getCommandPoolNode(dev_data, pool);
6240    if (dev_data->instance_data->disabled.destroy_command_pool)
6241        return false;
6242    bool skip = false;
6243    if (*cp_state) {
6244        // Verify that command buffers in pool are complete (not in-flight)
6245        skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_00077);
6246    }
6247    return skip;
6248}
6249
6250static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
6251    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
6252    clearCommandBuffersInFlight(dev_data, cp_state);
6253    for (auto cb : cp_state->commandBuffers) {
6254        clear_cmd_buf_and_mem_references(dev_data, cb);
6255        auto cb_node = getCBNode(dev_data, cb);
6256        // Remove references to this cb_node prior to delete
6257        // TODO : Need better solution here, resetCB?
6258        for (auto obj : cb_node->object_bindings) {
6259            removeCommandBufferBinding(dev_data, &obj, cb_node);
6260        }
6261        for (auto framebuffer : cb_node->framebuffers) {
6262            auto fb_state = getFramebufferState(dev_data, framebuffer);
6263            if (fb_state)
6264                fb_state->cb_bindings.erase(cb_node);
6265        }
6266        dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6267        delete cb_node;                       // delete CB info structure
6268    }
6269    dev_data->commandPoolMap.erase(pool);
6270}
6271
6272// Destroy commandPool along with all of the commandBuffers allocated from that pool
6273VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6274    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6275    COMMAND_POOL_NODE *cp_state = nullptr;
6276    std::unique_lock<std::mutex> lock(global_lock);
6277    bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
6278    if (!skip) {
6279        lock.unlock();
6280        dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
6281        lock.lock();
6282        PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
6283    }
6284}
6285
6286VKAPI_ATTR VkResult VKAPI_CALL
6287ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6288    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6289    bool skip_call = false;
6290
6291    std::unique_lock<std::mutex> lock(global_lock);
6292    auto pPool = getCommandPoolNode(dev_data, commandPool);
6293    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_00072);
6294    lock.unlock();
6295
6296    if (skip_call)
6297        return VK_ERROR_VALIDATION_FAILED_EXT;
6298
6299    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
6300
6301    // Reset all of the CBs allocated from this pool
6302    if (VK_SUCCESS == result) {
6303        lock.lock();
6304        clearCommandBuffersInFlight(dev_data, pPool);
6305        for (auto cmdBuffer : pPool->commandBuffers) {
6306            resetCB(dev_data, cmdBuffer);
6307        }
6308        lock.unlock();
6309    }
6310    return result;
6311}
6312
6313VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6314    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6315    bool skip_call = false;
6316    std::unique_lock<std::mutex> lock(global_lock);
6317    for (uint32_t i = 0; i < fenceCount; ++i) {
6318        auto pFence = getFenceNode(dev_data, pFences[i]);
6319        if (pFence && pFence->state == FENCE_INFLIGHT) {
6320            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6321                                 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, VALIDATION_ERROR_00183, "DS",
6322                                 "Fence 0x%" PRIx64 " is in use. %s", reinterpret_cast<const uint64_t &>(pFences[i]),
6323                                 validation_error_map[VALIDATION_ERROR_00183]);
6324        }
6325    }
6326    lock.unlock();
6327
6328    if (skip_call)
6329        return VK_ERROR_VALIDATION_FAILED_EXT;
6330
6331    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
6332
6333    if (result == VK_SUCCESS) {
6334        lock.lock();
6335        for (uint32_t i = 0; i < fenceCount; ++i) {
6336            auto pFence = getFenceNode(dev_data, pFences[i]);
6337            if (pFence) {
6338                pFence->state = FENCE_UNSIGNALED;
6339            }
6340        }
6341        lock.unlock();
6342    }
6343
6344    return result;
6345}
6346
6347// For given cb_nodes, invalidate them and track object causing invalidation
6348void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
6349    for (auto cb_node : cb_nodes) {
6350        if (cb_node->state == CB_RECORDING) {
6351            log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6352                    (uint64_t)(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6353                    "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
6354        }
6355        cb_node->state = CB_INVALID;
6356        cb_node->broken_bindings.push_back(obj);
6357    }
6358}
6359
6360static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
6361                                              FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
6362    *framebuffer_state = getFramebufferState(dev_data, framebuffer);
6363    *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT};
6364    if (dev_data->instance_data->disabled.destroy_framebuffer)
6365        return false;
6366    bool skip = false;
6367    if (*framebuffer_state) {
6368        skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422);
6369    }
6370    return skip;
6371}
6372
6373static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
6374                                             VK_OBJECT obj_struct) {
6375    invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
6376    dev_data->frameBufferMap.erase(framebuffer);
6377}
6378
6379VKAPI_ATTR void VKAPI_CALL
6380DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6381    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6382    FRAMEBUFFER_STATE *framebuffer_state = nullptr;
6383    VK_OBJECT obj_struct;
6384    std::unique_lock<std::mutex> lock(global_lock);
6385    bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
6386    if (!skip) {
6387        lock.unlock();
6388        dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
6389        lock.lock();
6390        PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
6391    }
6392}
6393
6394static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
6395                                             VK_OBJECT *obj_struct) {
6396    *rp_state = getRenderPassState(dev_data, render_pass);
6397    *obj_struct = {reinterpret_cast<uint64_t &>(render_pass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
6398    if (dev_data->instance_data->disabled.destroy_renderpass)
6399        return false;
6400    bool skip = false;
6401    if (*rp_state) {
6402        skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_00393);
6403    }
6404    return skip;
6405}
6406
6407static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
6408                                            VK_OBJECT obj_struct) {
6409    invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
6410    dev_data->renderPassMap.erase(render_pass);
6411}
6412
6413VKAPI_ATTR void VKAPI_CALL
6414DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6415    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6416    RENDER_PASS_STATE *rp_state = nullptr;
6417    VK_OBJECT obj_struct;
6418    std::unique_lock<std::mutex> lock(global_lock);
6419    bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
6420    if (!skip) {
6421        lock.unlock();
6422        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
6423        lock.lock();
6424        PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
6425    }
6426}
6427
6428VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6429                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6430    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6431    // TODO: Add check for VALIDATION_ERROR_00658
6432    // TODO: Add check for VALIDATION_ERROR_00666
6433    // TODO: Add check for VALIDATION_ERROR_00667
6434    // TODO: Add check for VALIDATION_ERROR_00668
6435    // TODO: Add check for VALIDATION_ERROR_00669
6436    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6437
6438    if (VK_SUCCESS == result) {
6439        std::lock_guard<std::mutex> lock(global_lock);
6440        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6441        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_STATE>(new BUFFER_STATE(*pBuffer, pCreateInfo))));
6442    }
6443    return result;
6444}
6445
6446static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6447    bool skip_call = false;
6448    BUFFER_STATE *buffer_state = getBufferState(dev_data, pCreateInfo->buffer);
6449    // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6450    if (buffer_state) {
6451        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCreateBufferView()", VALIDATION_ERROR_02522);
6452        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6453        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6454        skip_call |= ValidateBufferUsageFlags(
6455            dev_data, buffer_state, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
6456            VALIDATION_ERROR_00694, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6457    }
6458    return skip_call;
6459}
6460
6461VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6462                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6463    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6464    std::unique_lock<std::mutex> lock(global_lock);
6465    bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6466    lock.unlock();
6467    if (skip_call)
6468        return VK_ERROR_VALIDATION_FAILED_EXT;
6469    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
6470    if (VK_SUCCESS == result) {
6471        lock.lock();
6472        dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6473        lock.unlock();
6474    }
6475    return result;
6476}
6477
6478VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6479                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6480    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6481
6482    VkResult result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
6483
6484    if (VK_SUCCESS == result) {
6485        std::lock_guard<std::mutex> lock(global_lock);
6486        PostCallRecordCreateImage(&dev_data->imageMap, &dev_data->imageSubresourceMap, &dev_data->imageLayoutMap, pCreateInfo,
6487                                  pImage);
6488    }
6489    return result;
6490}
6491
6492static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6493    // Expects global_lock to be held by caller
6494
6495    auto image_state = getImageState(dev_data, image);
6496    if (image_state) {
6497        // If the caller used the special values VK_REMAINING_MIP_LEVELS and VK_REMAINING_ARRAY_LAYERS, resolve them now in our
6498        // internal state to the actual values.
6499        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6500            range->levelCount = image_state->createInfo.mipLevels - range->baseMipLevel;
6501        }
6502
6503        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6504            range->layerCount = image_state->createInfo.arrayLayers - range->baseArrayLayer;
6505        }
6506    }
6507}
6508
6509// Return the correct layer/level counts if the caller used the special
6510// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6511static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6512                                         VkImage image) {
6513    // Expects global_lock to be held by caller
6514
6515    *levels = range.levelCount;
6516    *layers = range.layerCount;
6517    auto image_state = getImageState(dev_data, image);
6518    if (image_state) {
6519        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6520            *levels = image_state->createInfo.mipLevels - range.baseMipLevel;
6521        }
6522        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6523            *layers = image_state->createInfo.arrayLayers - range.baseArrayLayer;
6524        }
6525    }
6526}
6527
6528// For the given format verify that the aspect masks make sense
6529static bool ValidateImageAspectMask(layer_data *dev_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
6530                                    const char *func_name) {
6531    bool skip = false;
6532    if (vk_format_is_color(format)) {
6533        if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
6534            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6535                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6536                            "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6537                            validation_error_map[VALIDATION_ERROR_00741]);
6538        } else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
6539            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6540                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6541                            "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6542                            validation_error_map[VALIDATION_ERROR_00741]);
6543        }
6544    } else if (vk_format_is_depth_and_stencil(format)) {
6545        if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
6546            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6547                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", "%s: Depth/stencil image formats must have "
6548                                                                                        "at least one of VK_IMAGE_ASPECT_DEPTH_BIT "
6549                                                                                        "and VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6550                            func_name, validation_error_map[VALIDATION_ERROR_00741]);
6551        } else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
6552            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6553                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6554                            "%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
6555                            "VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6556                            func_name, validation_error_map[VALIDATION_ERROR_00741]);
6557        }
6558    } else if (vk_format_is_depth_only(format)) {
6559        if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
6560            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6561                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6562                            "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6563                            validation_error_map[VALIDATION_ERROR_00741]);
6564        } else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
6565            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6566                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6567                            "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6568                            validation_error_map[VALIDATION_ERROR_00741]);
6569        }
6570    } else if (vk_format_is_stencil_only(format)) {
6571        if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
6572            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6573                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6574                            "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6575                            validation_error_map[VALIDATION_ERROR_00741]);
6576        } else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
6577            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6578                            (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6579                            "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6580                            validation_error_map[VALIDATION_ERROR_00741]);
6581        }
6582    }
6583    return skip;
6584}
6585
6586static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info) {
6587    bool skip = false;
6588    IMAGE_STATE *image_state = getImageState(dev_data, create_info->image);
6589    if (image_state) {
6590        skip |= ValidateImageUsageFlags(
6591            dev_data, image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6592                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6593            false, -1, "vkCreateImageView()",
6594            "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6595        // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6596        skip |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCreateImageView()", VALIDATION_ERROR_02524);
6597        // Checks imported from image layer
6598        if (create_info->subresourceRange.baseMipLevel >= image_state->createInfo.mipLevels) {
6599            std::stringstream ss;
6600            ss << "vkCreateImageView called with baseMipLevel " << create_info->subresourceRange.baseMipLevel << " for image "
6601               << create_info->image << " that only has " << image_state->createInfo.mipLevels << " mip levels.";
6602            skip |=
6603                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6604                        VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6605        }
6606        if (create_info->subresourceRange.baseArrayLayer >= image_state->createInfo.arrayLayers) {
6607            std::stringstream ss;
6608            ss << "vkCreateImageView called with baseArrayLayer " << create_info->subresourceRange.baseArrayLayer << " for image "
6609               << create_info->image << " that only has " << image_state->createInfo.arrayLayers << " array layers.";
6610            skip |=
6611                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6612                        VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6613        }
6614        // TODO: Need new valid usage language for levelCount == 0 & layerCount == 0
6615        if (!create_info->subresourceRange.levelCount) {
6616            std::stringstream ss;
6617            ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.levelCount.";
6618            skip |=
6619                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6620                        VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6621        }
6622        if (!create_info->subresourceRange.layerCount) {
6623            std::stringstream ss;
6624            ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.layerCount.";
6625            skip |=
6626                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6627                        VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6628        }
6629
6630        VkImageCreateFlags image_flags = image_state->createInfo.flags;
6631        VkFormat image_format = image_state->createInfo.format;
6632        VkFormat view_format = create_info->format;
6633        VkImageAspectFlags aspect_mask = create_info->subresourceRange.aspectMask;
6634
6635        // Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state
6636        if (image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
6637            // Format MUST be compatible (in the same format compatibility class) as the format the image was created with
6638            if (vk_format_get_compatibility_class(image_format) != vk_format_get_compatibility_class(view_format)) {
6639                std::stringstream ss;
6640                ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
6641                   << " is not in the same format compatibility class as image (" << (uint64_t)create_info->image << ")  format "
6642                   << string_VkFormat(image_format) << ".  Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
6643                   << "can support ImageViews with differing formats but they must be in the same compatibility class.";
6644                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6645                                VALIDATION_ERROR_02171, "IMAGE", "%s %s", ss.str().c_str(),
6646                                validation_error_map[VALIDATION_ERROR_02171]);
6647            }
6648        } else {
6649            // Format MUST be IDENTICAL to the format the image was created with
6650            if (image_format != view_format) {
6651                std::stringstream ss;
6652                ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from image "
6653                   << (uint64_t)create_info->image << " format " << string_VkFormat(image_format)
6654                   << ".  Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
6655                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6656                                VALIDATION_ERROR_02172, "IMAGE", "%s %s", ss.str().c_str(),
6657                                validation_error_map[VALIDATION_ERROR_02172]);
6658            }
6659        }
6660
6661        // Validate correct image aspect bits for desired formats and format consistency
6662        skip |= ValidateImageAspectMask(dev_data, image_state->image, image_format, aspect_mask, "vkCreateImageView()");
6663    }
6664    return skip;
6665}
6666
6667static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info, VkImageView view) {
6668    dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, create_info));
6669    ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, create_info->image);
6670}
6671
6672VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6673                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6674    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6675    std::unique_lock<std::mutex> lock(global_lock);
6676    bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6677    lock.unlock();
6678    if (skip)
6679        return VK_ERROR_VALIDATION_FAILED_EXT;
6680    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
6681    if (VK_SUCCESS == result) {
6682        lock.lock();
6683        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6684        lock.unlock();
6685    }
6686
6687    return result;
6688}
6689
6690VKAPI_ATTR VkResult VKAPI_CALL
6691CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6692    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6693    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
6694    if (VK_SUCCESS == result) {
6695        std::lock_guard<std::mutex> lock(global_lock);
6696        auto &fence_node = dev_data->fenceMap[*pFence];
6697        fence_node.fence = *pFence;
6698        fence_node.createInfo = *pCreateInfo;
6699        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6700    }
6701    return result;
6702}
6703
6704// TODO handle pipeline caches
6705VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6706                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6707    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6708    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6709    return result;
6710}
6711
6712VKAPI_ATTR void VKAPI_CALL
6713DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6714    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6715    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
6716}
6717
6718VKAPI_ATTR VkResult VKAPI_CALL
6719GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6720    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6721    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6722    return result;
6723}
6724
6725VKAPI_ATTR VkResult VKAPI_CALL
6726MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6727    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6728    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6729    return result;
6730}
6731
6732// utility function to set collective state for pipeline
6733void set_pipeline_state(PIPELINE_STATE *pPipe) {
6734    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6735    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6736        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6737            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6738                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6739                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6740                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6741                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6742                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6743                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6744                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6745                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6746                    pPipe->blendConstantsEnabled = true;
6747                }
6748            }
6749        }
6750    }
6751}
6752
6753static bool PreCallCreateGraphicsPipelines(layer_data *device_data, uint32_t count,
6754                                           const VkGraphicsPipelineCreateInfo *create_infos, vector<PIPELINE_STATE *> &pipe_state) {
6755    bool skip = false;
6756    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
6757
6758    for (uint32_t i = 0; i < count; i++) {
6759        skip |= verifyPipelineCreateState(device_data, pipe_state, i);
6760        if (create_infos[i].pVertexInputState != NULL) {
6761            for (uint32_t j = 0; j < create_infos[i].pVertexInputState->vertexAttributeDescriptionCount; j++) {
6762                VkFormat format = create_infos[i].pVertexInputState->pVertexAttributeDescriptions[j].format;
6763                // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
6764                VkFormatProperties properties;
6765                instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &properties);
6766                if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
6767                    skip |= log_msg(
6768                        device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6769                        __LINE__, VALIDATION_ERROR_01413, "IMAGE",
6770                        "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
6771                        "(%s) is not a supported vertex buffer format. %s",
6772                        i, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_01413]);
6773                }
6774            }
6775        }
6776    }
6777    return skip;
6778}
6779
6780VKAPI_ATTR VkResult VKAPI_CALL
6781CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6782                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6783                        VkPipeline *pPipelines) {
6784    // TODO What to do with pipelineCache?
6785    // The order of operations here is a little convoluted but gets the job done
6786    //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
6787    //  2. Create state is then validated (which uses flags setup during shadowing)
6788    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6789    bool skip = false;
6790    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6791    vector<PIPELINE_STATE *> pipe_state(count);
6792    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6793
6794    uint32_t i = 0;
6795    std::unique_lock<std::mutex> lock(global_lock);
6796
6797    for (i = 0; i < count; i++) {
6798        pipe_state[i] = new PIPELINE_STATE;
6799        pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
6800        pipe_state[i]->render_pass_ci.initialize(getRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6801        pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6802    }
6803    skip |= PreCallCreateGraphicsPipelines(dev_data, count, pCreateInfos, pipe_state);
6804
6805    if (skip) {
6806        for (i = 0; i < count; i++) {
6807            delete pipe_state[i];
6808            pPipelines[i] = VK_NULL_HANDLE;
6809        }
6810        return VK_ERROR_VALIDATION_FAILED_EXT;
6811    }
6812
6813    lock.unlock();
6814    auto result = dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6815    lock.lock();
6816    for (i = 0; i < count; i++) {
6817        if (pPipelines[i] == VK_NULL_HANDLE) {
6818            delete pipe_state[i];
6819        }
6820        else {
6821            pipe_state[i]->pipeline = pPipelines[i];
6822            dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
6823        }
6824    }
6825
6826    return result;
6827}
6828
6829VKAPI_ATTR VkResult VKAPI_CALL
6830CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6831                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6832                       VkPipeline *pPipelines) {
6833    bool skip = false;
6834
6835    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6836    vector<PIPELINE_STATE *> pPipeState(count);
6837    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6838
6839    uint32_t i = 0;
6840    std::unique_lock<std::mutex> lock(global_lock);
6841    for (i = 0; i < count; i++) {
6842        // TODO: Verify compute stage bits
6843
6844        // Create and initialize internal tracking data structure
6845        pPipeState[i] = new PIPELINE_STATE;
6846        pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
6847        pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6848
6849        // TODO: Add Compute Pipeline Verification
6850        skip |= !validate_compute_pipeline(dev_data->report_data, pPipeState[i], &dev_data->enabled_features,
6851                                                dev_data->shaderModuleMap);
6852        // skip |= verifyPipelineCreateState(dev_data, pPipeState[i]);
6853    }
6854
6855    if (skip) {
6856        for (i = 0; i < count; i++) {
6857            // Clean up any locally allocated data structures
6858            delete pPipeState[i];
6859            pPipelines[i] = VK_NULL_HANDLE;
6860        }
6861        return VK_ERROR_VALIDATION_FAILED_EXT;
6862    }
6863
6864    lock.unlock();
6865    auto result = dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6866    lock.lock();
6867    for (i = 0; i < count; i++) {
6868        if (pPipelines[i] == VK_NULL_HANDLE) {
6869            delete pPipeState[i];
6870        }
6871        else {
6872            pPipeState[i]->pipeline = pPipelines[i];
6873            dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6874        }
6875    }
6876
6877    return result;
6878}
6879
6880VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6881                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6882    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6883    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6884    if (VK_SUCCESS == result) {
6885        std::lock_guard<std::mutex> lock(global_lock);
6886        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
6887    }
6888    return result;
6889}
6890
6891static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
6892    if (dev_data->instance_data->disabled.create_descriptor_set_layout)
6893        return false;
6894    return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
6895}
6896
6897static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
6898                                                    VkDescriptorSetLayout set_layout) {
6899    // TODO: Convert this to unique_ptr to avoid leaks
6900    dev_data->descriptorSetLayoutMap[set_layout] = new cvdescriptorset::DescriptorSetLayout(create_info, set_layout);
6901}
6902
6903VKAPI_ATTR VkResult VKAPI_CALL
6904CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6905                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6906    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6907    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6908    std::unique_lock<std::mutex> lock(global_lock);
6909    bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
6910    if (!skip) {
6911        lock.unlock();
6912        result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6913        if (VK_SUCCESS == result) {
6914            lock.lock();
6915            PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
6916        }
6917    }
6918    return result;
6919}
6920
6921// Used by CreatePipelineLayout and CmdPushConstants.
6922// Note that the index argument is optional and only used by CreatePipelineLayout.
6923static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6924                                      const char *caller_name, uint32_t index = 0) {
6925    if (dev_data->instance_data->disabled.push_constant_range)
6926        return false;
6927    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6928    bool skip_call = false;
6929    // Check that offset + size don't exceed the max.
6930    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6931    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6932        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6933        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6934            if (offset >= maxPushConstantsSize) {
6935                skip_call |=
6936                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6937                            VALIDATION_ERROR_00877, "DS", "%s call has push constants index %u with offset %u that "
6938                                                          "exceeds this device's maxPushConstantSize of %u. %s",
6939                            caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6940            }
6941            if (size > maxPushConstantsSize - offset) {
6942                skip_call |=
6943                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6944                            VALIDATION_ERROR_00880, "DS", "%s call has push constants index %u with offset %u and size %u that "
6945                                                          "exceeds this device's maxPushConstantSize of %u. %s",
6946                            caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00880]);
6947            }
6948        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6949            if (offset >= maxPushConstantsSize) {
6950                skip_call |=
6951                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6952                            VALIDATION_ERROR_00991, "DS", "%s call has push constants index %u with offset %u that "
6953                                                          "exceeds this device's maxPushConstantSize of %u. %s",
6954                            caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00991]);
6955            }
6956            if (size > maxPushConstantsSize - offset) {
6957                skip_call |=
6958                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6959                            VALIDATION_ERROR_00992, "DS", "%s call has push constants index %u with offset %u and size %u that "
6960                                                          "exceeds this device's maxPushConstantSize of %u. %s",
6961                            caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00992]);
6962            }
6963        } else {
6964            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6965                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6966        }
6967    }
6968    // size needs to be non-zero and a multiple of 4.
6969    if ((size == 0) || ((size & 0x3) != 0)) {
6970        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6971            if (size == 0) {
6972                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6973                                     __LINE__, VALIDATION_ERROR_00878, "DS", "%s call has push constants index %u with "
6974                                                                             "size %u. Size must be greater than zero. %s",
6975                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
6976            }
6977            if (size & 0x3) {
6978                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6979                                     __LINE__, VALIDATION_ERROR_00879, "DS", "%s call has push constants index %u with "
6980                                                                             "size %u. Size must be a multiple of 4. %s",
6981                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00879]);
6982            }
6983        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6984            if (size == 0) {
6985                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6986                                     __LINE__, VALIDATION_ERROR_01000, "DS", "%s call has push constants index %u with "
6987                                                                             "size %u. Size must be greater than zero. %s",
6988                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_01000]);
6989            }
6990            if (size & 0x3) {
6991                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6992                                     __LINE__, VALIDATION_ERROR_00990, "DS", "%s call has push constants index %u with "
6993                                                                             "size %u. Size must be a multiple of 4. %s",
6994                                     caller_name, index, size, validation_error_map[VALIDATION_ERROR_00990]);
6995            }
6996        } else {
6997            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6998                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6999        }
7000    }
7001    // offset needs to be a multiple of 4.
7002    if ((offset & 0x3) != 0) {
7003        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
7004            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7005                                 VALIDATION_ERROR_02521, "DS", "%s call has push constants index %u with "
7006                                                               "offset %u. Offset must be a multiple of 4. %s",
7007                                 caller_name, index, offset, validation_error_map[VALIDATION_ERROR_02521]);
7008        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
7009            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7010                                 VALIDATION_ERROR_00989, "DS", "%s call has push constants with "
7011                                                               "offset %u. Offset must be a multiple of 4. %s",
7012                                 caller_name, offset, validation_error_map[VALIDATION_ERROR_00989]);
7013        } else {
7014            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7015                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
7016        }
7017    }
7018    return skip_call;
7019}
7020
7021VKAPI_ATTR VkResult VKAPI_CALL
7022CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
7023                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
7024    bool skip_call = false;
7025    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7026    // TODO : Add checks for VALIDATION_ERRORS 865-871
7027    // Push Constant Range checks
7028    uint32_t i, j;
7029    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
7030        skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
7031                                               pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
7032        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
7033            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7034                                 VALIDATION_ERROR_00882, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
7035                                 validation_error_map[VALIDATION_ERROR_00882]);
7036        }
7037    }
7038    if (skip_call)
7039        return VK_ERROR_VALIDATION_FAILED_EXT;
7040
7041    // Each range has been validated.  Now check for overlap between ranges (if they are good).
7042    // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
7043    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
7044        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
7045            const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
7046            const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
7047            const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
7048            const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
7049            if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
7050                skip_call |=
7051                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7052                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
7053                                                                  "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
7054                            i, minA, maxA, j, minB, maxB);
7055            }
7056        }
7057    }
7058
7059    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
7060    if (VK_SUCCESS == result) {
7061        std::lock_guard<std::mutex> lock(global_lock);
7062        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
7063        plNode.layout = *pPipelineLayout;
7064        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
7065        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
7066            plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
7067        }
7068        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
7069        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
7070            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
7071        }
7072    }
7073    return result;
7074}
7075
7076VKAPI_ATTR VkResult VKAPI_CALL
7077CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
7078                     VkDescriptorPool *pDescriptorPool) {
7079    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7080    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
7081    if (VK_SUCCESS == result) {
7082        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7083                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
7084                    (uint64_t)*pDescriptorPool))
7085            return VK_ERROR_VALIDATION_FAILED_EXT;
7086        DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
7087        if (NULL == pNewNode) {
7088            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7089                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
7090                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
7091                return VK_ERROR_VALIDATION_FAILED_EXT;
7092        } else {
7093            std::lock_guard<std::mutex> lock(global_lock);
7094            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
7095        }
7096    } else {
7097        // Need to do anything if pool create fails?
7098    }
7099    return result;
7100}
7101
7102VKAPI_ATTR VkResult VKAPI_CALL
7103ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
7104    // TODO : Add checks for VALIDATION_ERROR_00928
7105    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7106    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
7107    if (VK_SUCCESS == result) {
7108        std::lock_guard<std::mutex> lock(global_lock);
7109        clearDescriptorPool(dev_data, device, descriptorPool, flags);
7110    }
7111    return result;
7112}
7113// Ensure the pool contains enough descriptors and descriptor sets to satisfy
7114// an allocation request. Fills common_data with the total number of descriptors of each type required,
7115// as well as DescriptorSetLayout ptrs used for later update.
7116static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
7117                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
7118    if (dev_data->instance_data->disabled.allocate_descriptor_sets)
7119        return false;
7120    // All state checks for AllocateDescriptorSets is done in single function
7121    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
7122}
7123// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
7124static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
7125                                                 VkDescriptorSet *pDescriptorSets,
7126                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
7127    // All the updates are contained in a single cvdescriptorset function
7128    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
7129                                                   &dev_data->setMap, dev_data);
7130}
7131
7132VKAPI_ATTR VkResult VKAPI_CALL
7133AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
7134    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7135    std::unique_lock<std::mutex> lock(global_lock);
7136    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
7137    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
7138    lock.unlock();
7139
7140    if (skip_call)
7141        return VK_ERROR_VALIDATION_FAILED_EXT;
7142
7143    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
7144
7145    if (VK_SUCCESS == result) {
7146        lock.lock();
7147        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
7148        lock.unlock();
7149    }
7150    return result;
7151}
7152// Verify state before freeing DescriptorSets
7153static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
7154                                              const VkDescriptorSet *descriptor_sets) {
7155    if (dev_data->instance_data->disabled.free_descriptor_sets)
7156        return false;
7157    bool skip_call = false;
7158    // First make sure sets being destroyed are not currently in-use
7159    for (uint32_t i = 0; i < count; ++i)
7160        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
7161
7162    DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
7163    if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
7164        // Can't Free from a NON_FREE pool
7165        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7166                             reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
7167                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
7168                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
7169                             validation_error_map[VALIDATION_ERROR_00922]);
7170    }
7171    return skip_call;
7172}
7173// Sets have been removed from the pool so update underlying state
7174static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
7175                                             const VkDescriptorSet *descriptor_sets) {
7176    DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
7177    // Update available descriptor sets in pool
7178    pool_state->availableSets += count;
7179
7180    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
7181    for (uint32_t i = 0; i < count; ++i) {
7182        auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
7183        uint32_t type_index = 0, descriptor_count = 0;
7184        for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
7185            type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
7186            descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
7187            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
7188        }
7189        freeDescriptorSet(dev_data, descriptor_set);
7190        pool_state->sets.erase(descriptor_set);
7191    }
7192}
7193
7194VKAPI_ATTR VkResult VKAPI_CALL
7195FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
7196    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7197    // Make sure that no sets being destroyed are in-flight
7198    std::unique_lock<std::mutex> lock(global_lock);
7199    bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
7200    lock.unlock();
7201
7202    if (skip_call)
7203        return VK_ERROR_VALIDATION_FAILED_EXT;
7204    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
7205    if (VK_SUCCESS == result) {
7206        lock.lock();
7207        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
7208        lock.unlock();
7209    }
7210    return result;
7211}
7212// TODO : This is a Proof-of-concept for core validation architecture
7213//  Really we'll want to break out these functions to separate files but
7214//  keeping it all together here to prove out design
7215// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
7216static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7217                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7218                                                const VkCopyDescriptorSet *pDescriptorCopies) {
7219    if (dev_data->instance_data->disabled.update_descriptor_sets)
7220        return false;
7221    // First thing to do is perform map look-ups.
7222    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
7223    //  so we can't just do a single map look-up up-front, but do them individually in functions below
7224
7225    // Now make call(s) that validate state, but don't perform state updates in this function
7226    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
7227    //  namespace which will parse params and make calls into specific class instances
7228    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
7229                                                         descriptorCopyCount, pDescriptorCopies);
7230}
7231// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
7232static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7233                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7234                                               const VkCopyDescriptorSet *pDescriptorCopies) {
7235    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7236                                                 pDescriptorCopies);
7237}
7238
7239VKAPI_ATTR void VKAPI_CALL
7240UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
7241                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
7242    // Only map look-up at top level is for device-level layer_data
7243    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7244    std::unique_lock<std::mutex> lock(global_lock);
7245    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7246                                                         pDescriptorCopies);
7247    lock.unlock();
7248    if (!skip_call) {
7249        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7250                                                      pDescriptorCopies);
7251        lock.lock();
7252        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
7253        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7254                                           pDescriptorCopies);
7255    }
7256}
7257
7258VKAPI_ATTR VkResult VKAPI_CALL
7259AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
7260    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7261    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
7262    if (VK_SUCCESS == result) {
7263        std::unique_lock<std::mutex> lock(global_lock);
7264        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
7265
7266        if (pPool) {
7267            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
7268                // Add command buffer to its commandPool map
7269                pPool->commandBuffers.push_back(pCommandBuffer[i]);
7270                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
7271                // Add command buffer to map
7272                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
7273                resetCB(dev_data, pCommandBuffer[i]);
7274                pCB->createInfo = *pCreateInfo;
7275                pCB->device = device;
7276            }
7277        }
7278        lock.unlock();
7279    }
7280    return result;
7281}
7282
7283// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
7284static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
7285    addCommandBufferBinding(&fb_state->cb_bindings,
7286                            {reinterpret_cast<uint64_t &>(fb_state->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT},
7287                            cb_state);
7288    for (auto attachment : fb_state->attachments) {
7289        auto view_state = attachment.view_state;
7290        if (view_state) {
7291            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
7292        }
7293        auto rp_state = getRenderPassState(dev_data, fb_state->createInfo.renderPass);
7294        if (rp_state) {
7295            addCommandBufferBinding(
7296                &rp_state->cb_bindings,
7297                {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7298        }
7299    }
7300}
7301
7302VKAPI_ATTR VkResult VKAPI_CALL
7303BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
7304    bool skip_call = false;
7305    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7306    std::unique_lock<std::mutex> lock(global_lock);
7307    // Validate command buffer level
7308    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
7309    if (cb_node) {
7310        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
7311        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7312            skip_call |=
7313                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7314                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00104, "MEM",
7315                        "Calling vkBeginCommandBuffer() on active command buffer 0x%p before it has completed. "
7316                        "You must check command buffer fence before this call. %s",
7317                        commandBuffer, validation_error_map[VALIDATION_ERROR_00104]);
7318        }
7319        clear_cmd_buf_and_mem_references(dev_data, cb_node);
7320        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7321            // Secondary Command Buffer
7322            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
7323            if (!pInfo) {
7324                skip_call |=
7325                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7326                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00106, "DS",
7327                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s",
7328                            commandBuffer, validation_error_map[VALIDATION_ERROR_00106]);
7329            } else {
7330                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
7331                    // Object_tracker makes sure these objects are valid
7332                    assert(pInfo->renderPass);
7333                    assert(pInfo->framebuffer);
7334                    string errorString = "";
7335                    auto framebuffer = getFramebufferState(dev_data, pInfo->framebuffer);
7336                    if (framebuffer) {
7337                        if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
7338                            !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
7339                                                             getRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
7340                                                             errorString)) {
7341                            // renderPass that framebuffer was created with must be compatible with local renderPass
7342                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7343                                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7344                                                 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00112, "DS",
7345                                                 "vkBeginCommandBuffer(): Secondary Command "
7346                                                 "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
7347                                                 "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
7348                                                 commandBuffer, reinterpret_cast<const uint64_t &>(pInfo->renderPass),
7349                                                 reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
7350                                                 reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass),
7351                                                 errorString.c_str(), validation_error_map[VALIDATION_ERROR_00112]);
7352                        }
7353                        // Connect this framebuffer and its children to this cmdBuffer
7354                        AddFramebufferBinding(dev_data, cb_node, framebuffer);
7355                    }
7356                }
7357                if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
7358                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7359                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7360                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7361                                         __LINE__, VALIDATION_ERROR_00107, "DS",
7362                                         "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
7363                                         "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7364                                         "support precise occlusion queries. %s",
7365                                         commandBuffer, validation_error_map[VALIDATION_ERROR_00107]);
7366                }
7367            }
7368            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7369                auto renderPass = getRenderPassState(dev_data, pInfo->renderPass);
7370                if (renderPass) {
7371                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
7372                        skip_call |= log_msg(
7373                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7374                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7375                            VALIDATION_ERROR_00111, "DS",
7376                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
7377                            "that is less than the number of subpasses (%d). %s",
7378                            commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
7379                            validation_error_map[VALIDATION_ERROR_00111]);
7380                    }
7381                }
7382            }
7383        }
7384        if (CB_RECORDING == cb_node->state) {
7385            skip_call |=
7386                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7387                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00103, "DS",
7388                        "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
7389                        ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
7390                        commandBuffer, validation_error_map[VALIDATION_ERROR_00103]);
7391        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->last_cmd)) {
7392            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
7393            auto pPool = getCommandPoolNode(dev_data, cmdPool);
7394            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7395                skip_call |=
7396                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7397                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00105, "DS",
7398                            "Call to vkBeginCommandBuffer() on command buffer (0x%p"
7399                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
7400                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7401                            commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00105]);
7402            }
7403            resetCB(dev_data, commandBuffer);
7404        }
7405        // Set updated state here in case implicit reset occurs above
7406        cb_node->state = CB_RECORDING;
7407        cb_node->beginInfo = *pBeginInfo;
7408        if (cb_node->beginInfo.pInheritanceInfo) {
7409            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7410            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
7411            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
7412            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7413                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7414                cb_node->activeRenderPass = getRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
7415                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
7416                cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
7417                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
7418            }
7419        }
7420    }
7421    lock.unlock();
7422    if (skip_call) {
7423        return VK_ERROR_VALIDATION_FAILED_EXT;
7424    }
7425    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
7426
7427    return result;
7428}
7429
7430VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
7431    bool skip_call = false;
7432    VkResult result = VK_SUCCESS;
7433    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7434    std::unique_lock<std::mutex> lock(global_lock);
7435    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7436    if (pCB) {
7437        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
7438            !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7439            // This needs spec clarification to update valid usage, see comments in PR:
7440            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
7441            skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_00123);
7442        }
7443        skip_call |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7444        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_END);
7445        for (auto query : pCB->activeQueries) {
7446            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7447                                 VALIDATION_ERROR_00124, "DS",
7448                                 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
7449                                 (uint64_t)(query.pool), query.index, validation_error_map[VALIDATION_ERROR_00124]);
7450        }
7451    }
7452    if (!skip_call) {
7453        lock.unlock();
7454        result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
7455        lock.lock();
7456        if (VK_SUCCESS == result) {
7457            pCB->state = CB_RECORDED;
7458            // Reset CB status flags
7459            pCB->status = 0;
7460        }
7461    } else {
7462        result = VK_ERROR_VALIDATION_FAILED_EXT;
7463    }
7464    lock.unlock();
7465    return result;
7466}
7467
7468VKAPI_ATTR VkResult VKAPI_CALL
7469ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7470    bool skip_call = false;
7471    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7472    std::unique_lock<std::mutex> lock(global_lock);
7473    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7474    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7475    auto pPool = getCommandPoolNode(dev_data, cmdPool);
7476    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7477        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7478                             (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00093, "DS",
7479                             "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
7480                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7481                             commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00093]);
7482    }
7483    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_00092);
7484    lock.unlock();
7485    if (skip_call)
7486        return VK_ERROR_VALIDATION_FAILED_EXT;
7487    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
7488    if (VK_SUCCESS == result) {
7489        lock.lock();
7490        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
7491        resetCB(dev_data, commandBuffer);
7492        lock.unlock();
7493    }
7494    return result;
7495}
7496
7497VKAPI_ATTR void VKAPI_CALL
7498CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7499    bool skip = false;
7500    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7501    std::unique_lock<std::mutex> lock(global_lock);
7502    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
7503    if (cb_state) {
7504        skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7505        UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_BINDPIPELINE);
7506        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
7507            skip |=
7508                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7509                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7510                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7511                        (uint64_t)pipeline, (uint64_t)cb_state->activeRenderPass->renderPass);
7512        }
7513        // TODO: VALIDATION_ERROR_00594 VALIDATION_ERROR_00596
7514
7515        PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
7516        if (pipe_state) {
7517            cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
7518            set_cb_pso_status(cb_state, pipe_state);
7519            set_pipeline_state(pipe_state);
7520        } else {
7521            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7522                            (uint64_t)pipeline, __LINE__, VALIDATION_ERROR_00600, "DS",
7523                            "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", (uint64_t)(pipeline),
7524                            validation_error_map[VALIDATION_ERROR_00600]);
7525        }
7526        addCommandBufferBinding(&pipe_state->cb_bindings,
7527                                {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, cb_state);
7528        if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
7529            // Add binding for child renderpass
7530            auto rp_state = getRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
7531            if (rp_state) {
7532                addCommandBufferBinding(
7533                    &rp_state->cb_bindings,
7534                    {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7535            }
7536        }
7537    }
7538    lock.unlock();
7539    if (!skip)
7540        dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7541}
7542
7543VKAPI_ATTR void VKAPI_CALL
7544CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7545    bool skip_call = false;
7546    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7547    std::unique_lock<std::mutex> lock(global_lock);
7548    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7549    if (pCB) {
7550        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7551        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE);
7552        pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
7553    }
7554    lock.unlock();
7555    if (!skip_call)
7556        dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7557}
7558
7559VKAPI_ATTR void VKAPI_CALL
7560CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7561    bool skip_call = false;
7562    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7563    std::unique_lock<std::mutex> lock(global_lock);
7564    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7565    if (pCB) {
7566        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7567        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSCISSORSTATE);
7568        pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
7569    }
7570    lock.unlock();
7571    if (!skip_call)
7572        dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7573}
7574
7575VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7576    bool skip_call = false;
7577    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7578    std::unique_lock<std::mutex> lock(global_lock);
7579    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7580    if (pCB) {
7581        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7582        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE);
7583        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7584
7585        PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
7586        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7587            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7588                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, VALIDATION_ERROR_01476, "DS",
7589                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7590                                 "flag.  This is undefined behavior and could be ignored. %s",
7591                                 validation_error_map[VALIDATION_ERROR_01476]);
7592        } else {
7593            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7594        }
7595    }
7596    lock.unlock();
7597    if (!skip_call)
7598        dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
7599}
7600
7601VKAPI_ATTR void VKAPI_CALL
7602CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7603    bool skip_call = false;
7604    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7605    std::unique_lock<std::mutex> lock(global_lock);
7606    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7607    if (pCB) {
7608        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7609        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE);
7610        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7611    }
7612    lock.unlock();
7613    if (!skip_call)
7614        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
7615}
7616
7617VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7618    bool skip_call = false;
7619    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7620    std::unique_lock<std::mutex> lock(global_lock);
7621    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7622    if (pCB) {
7623        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7624        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETBLENDSTATE);
7625        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7626    }
7627    lock.unlock();
7628    if (!skip_call)
7629        dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
7630}
7631
7632VKAPI_ATTR void VKAPI_CALL
7633CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7634    bool skip_call = false;
7635    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7636    std::unique_lock<std::mutex> lock(global_lock);
7637    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7638    if (pCB) {
7639        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7640        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE);
7641        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7642    }
7643    lock.unlock();
7644    if (!skip_call)
7645        dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7646}
7647
7648VKAPI_ATTR void VKAPI_CALL
7649CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7650    bool skip_call = false;
7651    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7652    std::unique_lock<std::mutex> lock(global_lock);
7653    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7654    if (pCB) {
7655        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7656        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE);
7657        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7658    }
7659    lock.unlock();
7660    if (!skip_call)
7661        dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7662}
7663
7664VKAPI_ATTR void VKAPI_CALL
7665CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7666    bool skip_call = false;
7667    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7668    std::unique_lock<std::mutex> lock(global_lock);
7669    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7670    if (pCB) {
7671        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7672        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE);
7673        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7674    }
7675    lock.unlock();
7676    if (!skip_call)
7677        dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7678}
7679
7680VKAPI_ATTR void VKAPI_CALL
7681CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7682    bool skip_call = false;
7683    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7684    std::unique_lock<std::mutex> lock(global_lock);
7685    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7686    if (pCB) {
7687        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7688        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE);
7689        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7690    }
7691    lock.unlock();
7692    if (!skip_call)
7693        dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
7694}
7695
7696VKAPI_ATTR void VKAPI_CALL
7697CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7698                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7699                      const uint32_t *pDynamicOffsets) {
7700    bool skip_call = false;
7701    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7702    std::unique_lock<std::mutex> lock(global_lock);
7703    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7704    if (pCB) {
7705        if (pCB->state == CB_RECORDING) {
7706            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7707            uint32_t totalDynamicDescriptors = 0;
7708            string errorString = "";
7709            uint32_t lastSetIndex = firstSet + setCount - 1;
7710            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7711                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7712                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7713            }
7714            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7715            auto pipeline_layout = getPipelineLayout(dev_data, layout);
7716            for (uint32_t i = 0; i < setCount; i++) {
7717                cvdescriptorset::DescriptorSet *descriptor_set = getSetNode(dev_data, pDescriptorSets[i]);
7718                if (descriptor_set) {
7719                    pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7720                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = descriptor_set;
7721                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7722                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7723                                         DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
7724                                         (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7725                    if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
7726                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7727                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7728                                             DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7729                                             "Descriptor Set 0x%" PRIxLEAST64
7730                                             " bound but it was never updated. You may want to either update it or not bind it.",
7731                                             (uint64_t)pDescriptorSets[i]);
7732                    }
7733                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7734                    if (!verify_set_layout_compatibility(dev_data, descriptor_set, pipeline_layout, i + firstSet, errorString)) {
7735                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7736                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7737                                             VALIDATION_ERROR_00974, "DS",
7738                                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7739                                             "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
7740                                             i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str(),
7741                                             validation_error_map[VALIDATION_ERROR_00974]);
7742                    }
7743
7744                    auto setDynamicDescriptorCount = descriptor_set->GetDynamicDescriptorCount();
7745
7746                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7747
7748                    if (setDynamicDescriptorCount) {
7749                        // First make sure we won't overstep bounds of pDynamicOffsets array
7750                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7751                            skip_call |=
7752                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7753                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7754                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7755                                        "descriptorSet #%u (0x%" PRIxLEAST64
7756                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7757                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7758                                        i, (uint64_t)pDescriptorSets[i], descriptor_set->GetDynamicDescriptorCount(),
7759                                        (dynamicOffsetCount - totalDynamicDescriptors));
7760                        } else { // Validate and store dynamic offsets with the set
7761                            // Validate Dynamic Offset Minimums
7762                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7763                            for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
7764                                if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7765                                    if (vk_safe_modulo(
7766                                            pDynamicOffsets[cur_dyn_offset],
7767                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7768                                        skip_call |= log_msg(
7769                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7770                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7771                                            "DS", "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7772                                                  "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
7773                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7774                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
7775                                            validation_error_map[VALIDATION_ERROR_00978]);
7776                                    }
7777                                    cur_dyn_offset++;
7778                                } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7779                                    if (vk_safe_modulo(
7780                                            pDynamicOffsets[cur_dyn_offset],
7781                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7782                                        skip_call |= log_msg(
7783                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7784                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7785                                            "DS", "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7786                                                  "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
7787                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7788                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
7789                                            validation_error_map[VALIDATION_ERROR_00978]);
7790                                    }
7791                                    cur_dyn_offset++;
7792                                }
7793                            }
7794
7795                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7796                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7797                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7798                            // Keep running total of dynamic descriptor count to verify at the end
7799                            totalDynamicDescriptors += setDynamicDescriptorCount;
7800
7801                        }
7802                    }
7803                } else {
7804                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7805                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7806                                         DRAWSTATE_INVALID_SET, "DS", "Attempt to bind descriptor set 0x%" PRIxLEAST64
7807                                         " that doesn't exist!",
7808                                         (uint64_t)pDescriptorSets[i]);
7809                }
7810                skip_call |= ValidateCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7811                UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS);
7812                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7813                if (firstSet > 0) { // Check set #s below the first bound set
7814                    for (uint32_t i = 0; i < firstSet; ++i) {
7815                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7816                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7817                                                             pipeline_layout, i, errorString)) {
7818                            skip_call |= log_msg(
7819                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7820                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7821                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7822                                "DescriptorSet 0x%" PRIxLEAST64
7823                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7824                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7825                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7826                        }
7827                    }
7828                }
7829                // Check if newly last bound set invalidates any remaining bound sets
7830                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7831                    if (oldFinalBoundSet &&
7832                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7833                        auto old_set = oldFinalBoundSet->GetSet();
7834                        skip_call |=
7835                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7836                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7837                                    DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
7838                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7839                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7840                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7841                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7842                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7843                                    lastSetIndex + 1, (uint64_t)layout);
7844                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7845                    }
7846                }
7847            }
7848            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7849            if (totalDynamicDescriptors != dynamicOffsetCount) {
7850                skip_call |=
7851                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7852                            (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00975, "DS",
7853                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7854                            "is %u. It should exactly match the number of dynamic descriptors. %s",
7855                            setCount, totalDynamicDescriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_00975]);
7856            }
7857        } else {
7858            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7859        }
7860    }
7861    lock.unlock();
7862    if (!skip_call)
7863        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7864                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7865}
7866
7867VKAPI_ATTR void VKAPI_CALL
7868CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7869    bool skip_call = false;
7870    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7871    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7872    std::unique_lock<std::mutex> lock(global_lock);
7873
7874    auto buffer_state = getBufferState(dev_data, buffer);
7875    auto cb_node = getCBNode(dev_data, commandBuffer);
7876    if (cb_node && buffer_state) {
7877        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_02543);
7878        std::function<bool()> function = [=]() {
7879            return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
7880        };
7881        cb_node->validate_functions.push_back(function);
7882        skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7883        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER);
7884        VkDeviceSize offset_align = 0;
7885        switch (indexType) {
7886        case VK_INDEX_TYPE_UINT16:
7887            offset_align = 2;
7888            break;
7889        case VK_INDEX_TYPE_UINT32:
7890            offset_align = 4;
7891            break;
7892        default:
7893            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7894            break;
7895        }
7896        if (!offset_align || (offset % offset_align)) {
7897            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7898                                 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7899                                 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7900                                 offset, string_VkIndexType(indexType));
7901        }
7902        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7903    } else {
7904        assert(0);
7905    }
7906    lock.unlock();
7907    if (!skip_call)
7908        dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7909}
7910
7911void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7912    uint32_t end = firstBinding + bindingCount;
7913    if (pCB->currentDrawData.buffers.size() < end) {
7914        pCB->currentDrawData.buffers.resize(end);
7915    }
7916    for (uint32_t i = 0; i < bindingCount; ++i) {
7917        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7918    }
7919}
7920
7921static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7922
7923VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7924                                                uint32_t bindingCount, const VkBuffer *pBuffers,
7925                                                const VkDeviceSize *pOffsets) {
7926    bool skip_call = false;
7927    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7928    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7929    std::unique_lock<std::mutex> lock(global_lock);
7930
7931    auto cb_node = getCBNode(dev_data, commandBuffer);
7932    if (cb_node) {
7933        for (uint32_t i = 0; i < bindingCount; ++i) {
7934            auto buffer_state = getBufferState(dev_data, pBuffers[i]);
7935            assert(buffer_state);
7936            skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_02546);
7937            std::function<bool()> function = [=]() {
7938                return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
7939            };
7940            cb_node->validate_functions.push_back(function);
7941        }
7942        skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7943        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER);
7944        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7945    } else {
7946        skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7947    }
7948    lock.unlock();
7949    if (!skip_call)
7950        dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7951}
7952
7953// Expects global_lock to be held by caller
7954static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7955    for (auto imageView : pCB->updateImages) {
7956        auto view_state = getImageViewState(dev_data, imageView);
7957        if (!view_state)
7958            continue;
7959
7960        auto image_state = getImageState(dev_data, view_state->create_info.image);
7961        assert(image_state);
7962        std::function<bool()> function = [=]() {
7963            SetImageMemoryValid(dev_data, image_state, true);
7964            return false;
7965        };
7966        pCB->validate_functions.push_back(function);
7967    }
7968    for (auto buffer : pCB->updateBuffers) {
7969        auto buffer_state = getBufferState(dev_data, buffer);
7970        assert(buffer_state);
7971        std::function<bool()> function = [=]() {
7972            SetBufferMemoryValid(dev_data, buffer_state, true);
7973            return false;
7974        };
7975        pCB->validate_functions.push_back(function);
7976    }
7977}
7978
7979// Generic function to handle validation for all CmdDraw* type functions
7980static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
7981                                CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller,
7982                                UNIQUE_VALIDATION_ERROR_CODE msg_code, UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
7983    bool skip = false;
7984    *cb_state = getCBNode(dev_data, cmd_buffer);
7985    if (*cb_state) {
7986        skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
7987        skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
7988        skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
7989                                                                : insideRenderPass(dev_data, *cb_state, caller, msg_code);
7990    }
7991    return skip;
7992}
7993
7994// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
7995static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7996                                           CMD_TYPE cmd_type) {
7997    UpdateDrawState(dev_data, cb_state, bind_point);
7998    MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
7999    UpdateCmdBufferLastCmd(dev_data, cb_state, cmd_type);
8000}
8001
8002// Generic function to handle state update for all CmdDraw* type functions
8003static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8004                                   CMD_TYPE cmd_type, DRAW_TYPE draw_type) {
8005    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, cmd_type);
8006    updateResourceTrackingOnDraw(cb_state);
8007    cb_state->drawCount[draw_type]++;
8008}
8009
8010static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
8011                                   GLOBAL_CB_NODE **cb_state, const char *caller) {
8012    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VALIDATION_ERROR_01365,
8013                               VALIDATION_ERROR_02203);
8014}
8015
8016static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8017    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAW, DRAW);
8018}
8019
8020VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
8021                                   uint32_t firstVertex, uint32_t firstInstance) {
8022    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8023    GLOBAL_CB_NODE *cb_state = nullptr;
8024    std::unique_lock<std::mutex> lock(global_lock);
8025    bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
8026    lock.unlock();
8027    if (!skip) {
8028        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
8029        lock.lock();
8030        PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
8031        lock.unlock();
8032    }
8033}
8034
8035static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
8036                                          VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
8037    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VALIDATION_ERROR_01372,
8038                               VALIDATION_ERROR_02216);
8039}
8040
8041static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8042    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXED, DRAW_INDEXED);
8043}
8044
8045VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
8046                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
8047                                                            uint32_t firstInstance) {
8048    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8049    GLOBAL_CB_NODE *cb_state = nullptr;
8050    std::unique_lock<std::mutex> lock(global_lock);
8051    bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
8052                                              "vkCmdDrawIndexed()");
8053    lock.unlock();
8054    if (!skip) {
8055        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
8056        lock.lock();
8057        PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
8058        lock.unlock();
8059    }
8060}
8061
8062static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8063                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
8064                                           const char *caller) {
8065    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller,
8066                                    VALIDATION_ERROR_01381, VALIDATION_ERROR_02234);
8067    *buffer_state = getBufferState(dev_data, buffer);
8068    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02544);
8069    return skip;
8070}
8071
8072static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8073                                          BUFFER_STATE *buffer_state) {
8074    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDIRECT, DRAW_INDIRECT);
8075    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8076}
8077
8078VKAPI_ATTR void VKAPI_CALL
8079CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
8080    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8081    GLOBAL_CB_NODE *cb_state = nullptr;
8082    BUFFER_STATE *buffer_state = nullptr;
8083    std::unique_lock<std::mutex> lock(global_lock);
8084    bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
8085                                               &buffer_state, "vkCmdDrawIndirect()");
8086    lock.unlock();
8087    if (!skip) {
8088        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
8089        lock.lock();
8090        PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
8091        lock.unlock();
8092    }
8093}
8094
8095static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8096                                                  VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
8097                                                  BUFFER_STATE **buffer_state, const char *caller) {
8098    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
8099                                    VALIDATION_ERROR_01393, VALIDATION_ERROR_02272);
8100    *buffer_state = getBufferState(dev_data, buffer);
8101    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02545);
8102    return skip;
8103}
8104
8105static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8106                                                 BUFFER_STATE *buffer_state) {
8107    UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXEDINDIRECT, DRAW_INDEXED_INDIRECT);
8108    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8109}
8110
8111VKAPI_ATTR void VKAPI_CALL
8112CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
8113    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8114    GLOBAL_CB_NODE *cb_state = nullptr;
8115    BUFFER_STATE *buffer_state = nullptr;
8116    std::unique_lock<std::mutex> lock(global_lock);
8117    bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
8118                                                      &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
8119    lock.unlock();
8120    if (!skip) {
8121        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
8122        lock.lock();
8123        PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
8124        lock.unlock();
8125    }
8126}
8127
8128static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
8129                                       VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
8130    return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VALIDATION_ERROR_01562,
8131                               VALIDATION_ERROR_UNDEFINED);
8132}
8133
8134static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8135    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCH);
8136}
8137
8138VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
8139    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8140    GLOBAL_CB_NODE *cb_state = nullptr;
8141    std::unique_lock<std::mutex> lock(global_lock);
8142    bool skip =
8143        PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
8144    lock.unlock();
8145    if (!skip) {
8146        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
8147        lock.lock();
8148        PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
8149        lock.unlock();
8150    }
8151}
8152
8153static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8154                                               VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
8155                                               BUFFER_STATE **buffer_state, const char *caller) {
8156    bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller,
8157                                    VALIDATION_ERROR_01569, VALIDATION_ERROR_UNDEFINED);
8158    *buffer_state = getBufferState(dev_data, buffer);
8159    skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02547);
8160    return skip;
8161}
8162
8163static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8164                                              BUFFER_STATE *buffer_state) {
8165    UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCHINDIRECT);
8166    AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8167}
8168
8169VKAPI_ATTR void VKAPI_CALL
8170CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
8171    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8172    GLOBAL_CB_NODE *cb_state = nullptr;
8173    BUFFER_STATE *buffer_state = nullptr;
8174    std::unique_lock<std::mutex> lock(global_lock);
8175    bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
8176                                                   &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
8177    lock.unlock();
8178    if (!skip) {
8179        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
8180        lock.lock();
8181        PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
8182        lock.unlock();
8183    }
8184}
8185
8186VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
8187                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
8188    bool skip_call = false;
8189    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8190    std::unique_lock<std::mutex> lock(global_lock);
8191
8192    auto cb_node = getCBNode(dev_data, commandBuffer);
8193    auto src_buff_state = getBufferState(dev_data, srcBuffer);
8194    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8195    if (cb_node && src_buff_state && dst_buff_state) {
8196        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02531);
8197        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02532);
8198        // Update bindings between buffers and cmd buffer
8199        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
8200        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8201        // Validate that SRC & DST buffers have correct usage flags set
8202        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8203                                              VALIDATION_ERROR_01164, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8204        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8205                                              VALIDATION_ERROR_01165, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8206
8207        std::function<bool()> function = [=]() {
8208            return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBuffer()");
8209        };
8210        cb_node->validate_functions.push_back(function);
8211        function = [=]() {
8212            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8213            return false;
8214        };
8215        cb_node->validate_functions.push_back(function);
8216
8217        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
8218        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFER);
8219        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()", VALIDATION_ERROR_01172);
8220    } else {
8221        // Param_checker will flag errors on invalid objects, just assert here as debugging aid
8222        assert(0);
8223    }
8224    lock.unlock();
8225    if (!skip_call)
8226        dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
8227}
8228
8229static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
8230                                    VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout,
8231                                    UNIQUE_VALIDATION_ERROR_CODE msgCode) {
8232    bool skip_call = false;
8233
8234    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8235        uint32_t layer = i + subLayers.baseArrayLayer;
8236        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8237        IMAGE_CMD_BUF_LAYOUT_NODE node;
8238        if (!FindLayout(cb_node, srcImage, sub, node)) {
8239            SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
8240            continue;
8241        }
8242        if (node.layout != srcImageLayout) {
8243            // TODO: Improve log message in the next pass
8244            skip_call |=
8245                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8246                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
8247                                                                        "and doesn't match the current layout %s.",
8248                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
8249        }
8250    }
8251    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
8252        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
8253            // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
8254            auto image_state = getImageState(dev_data, srcImage);
8255            if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8256                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8257                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8258                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8259                                     "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
8260            }
8261        } else {
8262            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8263                                 msgCode, "DS", "Layout for input image is %s but can only be TRANSFER_SRC_OPTIMAL or GENERAL. %s",
8264                                 string_VkImageLayout(srcImageLayout), validation_error_map[msgCode]);
8265        }
8266    }
8267    return skip_call;
8268}
8269
8270static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
8271                                  VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout,
8272                                  UNIQUE_VALIDATION_ERROR_CODE msgCode) {
8273    bool skip_call = false;
8274
8275    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8276        uint32_t layer = i + subLayers.baseArrayLayer;
8277        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8278        IMAGE_CMD_BUF_LAYOUT_NODE node;
8279        if (!FindLayout(cb_node, destImage, sub, node)) {
8280            SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
8281            continue;
8282        }
8283        if (node.layout != destImageLayout) {
8284            skip_call |=
8285                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8286                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
8287                                                                        "doesn't match the current layout %s.",
8288                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
8289        }
8290    }
8291    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8292        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
8293            auto image_state = getImageState(dev_data, destImage);
8294            if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8295                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8296                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8297                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8298                                     "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
8299            }
8300        } else {
8301            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8302                                 msgCode, "DS", "Layout for output image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL. %s",
8303                                 string_VkImageLayout(destImageLayout), validation_error_map[msgCode]);
8304        }
8305    }
8306    return skip_call;
8307}
8308
8309static bool VerifyClearImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage image, VkImageSubresourceRange range,
8310                                   VkImageLayout dest_image_layout, const char *func_name) {
8311    bool skip = false;
8312
8313    VkImageSubresourceRange resolvedRange = range;
8314    ResolveRemainingLevelsLayers(dev_data, &resolvedRange, image);
8315
8316    if (dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8317        if (dest_image_layout == VK_IMAGE_LAYOUT_GENERAL) {
8318            auto image_state = getImageState(dev_data, image);
8319            if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8320                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8321                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8322                                0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8323                                "%s: Layout for cleared image should be TRANSFER_DST_OPTIMAL instead of GENERAL.", func_name);
8324            }
8325        } else {
8326            UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_01086;
8327            if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
8328                error_code = VALIDATION_ERROR_01101;
8329            } else {
8330                assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
8331            }
8332            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8333                            error_code, "DS", "%s: Layout for cleared image is %s but can only be "
8334                                              "TRANSFER_DST_OPTIMAL or GENERAL. %s",
8335                            func_name, string_VkImageLayout(dest_image_layout), validation_error_map[error_code]);
8336        }
8337    }
8338
8339    for (uint32_t levelIdx = 0; levelIdx < resolvedRange.levelCount; ++levelIdx) {
8340        uint32_t level = levelIdx + resolvedRange.baseMipLevel;
8341        for (uint32_t layerIdx = 0; layerIdx < resolvedRange.layerCount; ++layerIdx) {
8342            uint32_t layer = layerIdx + resolvedRange.baseArrayLayer;
8343            VkImageSubresource sub = {resolvedRange.aspectMask, level, layer};
8344            IMAGE_CMD_BUF_LAYOUT_NODE node;
8345            if (!FindLayout(cb_node, image, sub, node)) {
8346                SetLayout(cb_node, image, sub, IMAGE_CMD_BUF_LAYOUT_NODE(dest_image_layout, dest_image_layout));
8347                continue;
8348            }
8349            if (node.layout != dest_image_layout) {
8350                UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_01085;
8351                if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
8352                    error_code = VALIDATION_ERROR_01100;
8353                } else {
8354                    assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
8355                }
8356                skip |=
8357                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8358                            __LINE__, error_code, "DS", "%s: Cannot clear an image whose layout is %s and "
8359                                                        "doesn't match the current layout %s. %s",
8360                            func_name, string_VkImageLayout(dest_image_layout), string_VkImageLayout(node.layout),
8361                            validation_error_map[error_code]);
8362            }
8363        }
8364    }
8365
8366    return skip;
8367}
8368
8369// Test if two VkExtent3D structs are equivalent
8370static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
8371    bool result = true;
8372    if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
8373        (extent->depth != other_extent->depth)) {
8374        result = false;
8375    }
8376    return result;
8377}
8378
8379// Returns the image extent of a specific subresource.
8380static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
8381    const uint32_t mip = subresource->mipLevel;
8382    VkExtent3D extent = img->createInfo.extent;
8383    extent.width = std::max(1U, extent.width >> mip);
8384    extent.height = std::max(1U, extent.height >> mip);
8385    extent.depth = std::max(1U, extent.depth >> mip);
8386    return extent;
8387}
8388
8389// Test if the extent argument has all dimensions set to 0.
8390static inline bool IsExtentZero(const VkExtent3D *extent) {
8391    return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
8392}
8393
8394// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
8395static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
8396    // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
8397    VkExtent3D granularity = { 0, 0, 0 };
8398    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
8399    if (pPool) {
8400        granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
8401        if (vk_format_is_compressed(img->createInfo.format)) {
8402            auto block_size = vk_format_compressed_block_size(img->createInfo.format);
8403            granularity.width *= block_size.width;
8404            granularity.height *= block_size.height;
8405        }
8406    }
8407    return granularity;
8408}
8409
8410// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
8411static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
8412    bool valid = true;
8413    if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
8414        (vk_safe_modulo(extent->height, granularity->height) != 0)) {
8415        valid = false;
8416    }
8417    return valid;
8418}
8419
8420// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
8421static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
8422                                  const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
8423    bool skip = false;
8424    VkExtent3D offset_extent = {};
8425    offset_extent.width = static_cast<uint32_t>(abs(offset->x));
8426    offset_extent.height = static_cast<uint32_t>(abs(offset->y));
8427    offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
8428    if (IsExtentZero(granularity)) {
8429        // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
8430        if (IsExtentZero(&offset_extent) == false) {
8431            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8432                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8433                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
8434                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8435                            function, i, member, offset->x, offset->y, offset->z);
8436        }
8437    } else {
8438        // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
8439        // integer multiples of the image transfer granularity.
8440        if (IsExtentAligned(&offset_extent, granularity) == false) {
8441            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8442                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8443                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
8444                            "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
8445                            function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
8446                            granularity->depth);
8447        }
8448    }
8449    return skip;
8450}
8451
8452// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
8453static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
8454                                  const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
8455                                  const uint32_t i, const char *function, const char *member) {
8456    bool skip = false;
8457    if (IsExtentZero(granularity)) {
8458        // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
8459        // subresource extent.
8460        if (IsExtentEqual(extent, subresource_extent) == false) {
8461            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8462                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8463                            "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
8464                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8465                            function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
8466                            subresource_extent->height, subresource_extent->depth);
8467        }
8468    } else {
8469        // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
8470        // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
8471        // subresource extent dimensions.
8472        VkExtent3D offset_extent_sum = {};
8473        offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
8474        offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
8475        offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
8476        if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
8477            skip |=
8478                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8479                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8480                        "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
8481                        "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
8482                        "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
8483                        function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
8484                        granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
8485                        subresource_extent->width, subresource_extent->height, subresource_extent->depth);
8486        }
8487    }
8488    return skip;
8489}
8490
8491// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
8492static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
8493                               const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8494    bool skip = false;
8495    if (vk_safe_modulo(value, granularity) != 0) {
8496        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8497                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8498                        "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
8499                        "transfer granularity width (%d).",
8500                        function, i, member, value, granularity);
8501    }
8502    return skip;
8503}
8504
8505// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
8506static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
8507                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8508    bool skip = false;
8509    if (vk_safe_modulo(value, granularity) != 0) {
8510        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8511                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8512                        "%s: pRegion[%d].%s (%" PRIdLEAST64
8513                        ") must be an even integer multiple of this command buffer's queue family image transfer "
8514                        "granularity width (%d).",
8515                        function, i, member, value, granularity);
8516    }
8517    return skip;
8518}
8519
8520// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
8521static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8522                                                                    const IMAGE_STATE *img, const VkImageCopy *region,
8523                                                                    const uint32_t i, const char *function) {
8524    bool skip = false;
8525    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8526    skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
8527    skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
8528    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
8529    skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
8530                           "extent");
8531    return skip;
8532}
8533
8534// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
8535static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8536                                                                          const IMAGE_STATE *img, const VkBufferImageCopy *region,
8537                                                                          const uint32_t i, const char *function) {
8538    bool skip = false;
8539    if (vk_format_is_compressed(img->createInfo.format) == true) {
8540        // TODO: Add granularity checking for compressed formats
8541
8542        // bufferRowLength must be a multiple of the compressed texel block width
8543        // bufferImageHeight must be a multiple of the compressed texel block height
8544        // all members of imageOffset must be a multiple of the corresponding dimensions of the compressed texel block
8545        // bufferOffset must be a multiple of the compressed texel block size in bytes
8546        // imageExtent.width must be a multiple of the compressed texel block width or (imageExtent.width + imageOffset.x)
8547        //     must equal the image subresource width
8548        // imageExtent.height must be a multiple of the compressed texel block height or (imageExtent.height + imageOffset.y)
8549        //     must equal the image subresource height
8550        // imageExtent.depth must be a multiple of the compressed texel block depth or (imageExtent.depth + imageOffset.z)
8551        //     must equal the image subresource depth
8552    } else {
8553        VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8554        skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
8555        skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
8556        skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
8557        skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8558        VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8559        skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8560                               function, "imageExtent");
8561    }
8562    return skip;
8563}
8564
8565VKAPI_ATTR void VKAPI_CALL
8566CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8567             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
8568    bool skip_call = false;
8569    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8570    std::unique_lock<std::mutex> lock(global_lock);
8571
8572    auto cb_node = getCBNode(dev_data, commandBuffer);
8573    auto src_image_state = getImageState(dev_data, srcImage);
8574    auto dst_image_state = getImageState(dev_data, dstImage);
8575    if (cb_node && src_image_state && dst_image_state) {
8576        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02533);
8577        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02534);
8578        // Update bindings between images and cmd buffer
8579        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8580        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8581        // Validate that SRC & DST images have correct usage flags set
8582        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8583                                             VALIDATION_ERROR_01178, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8584        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8585                                             VALIDATION_ERROR_01181, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8586        std::function<bool()> function = [=]() {
8587            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImage()");
8588        };
8589        cb_node->validate_functions.push_back(function);
8590        function = [=]() {
8591            SetImageMemoryValid(dev_data, dst_image_state, true);
8592            return false;
8593        };
8594        cb_node->validate_functions.push_back(function);
8595
8596        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8597        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGE);
8598        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()", VALIDATION_ERROR_01194);
8599        for (uint32_t i = 0; i < regionCount; ++i) {
8600            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout,
8601                                                 VALIDATION_ERROR_01180);
8602            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout,
8603                                               VALIDATION_ERROR_01183);
8604            skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8605                                                                          "vkCmdCopyImage()");
8606        }
8607    } else {
8608        assert(0);
8609    }
8610    lock.unlock();
8611    if (!skip_call)
8612        dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8613                                              pRegions);
8614}
8615
8616// Validate that an image's sampleCount matches the requirement for a specific API call
8617static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
8618                                            const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
8619    bool skip = false;
8620    if (image_state->createInfo.samples != sample_count) {
8621        skip =
8622            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8623                    reinterpret_cast<uint64_t &>(image_state->image), 0, msgCode, "DS",
8624                    "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
8625                    reinterpret_cast<uint64_t &>(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
8626                    string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
8627    }
8628    return skip;
8629}
8630
8631VKAPI_ATTR void VKAPI_CALL
8632CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8633             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
8634    bool skip_call = false;
8635    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8636    std::unique_lock<std::mutex> lock(global_lock);
8637
8638    auto cb_node = getCBNode(dev_data, commandBuffer);
8639    auto src_image_state = getImageState(dev_data, srcImage);
8640    auto dst_image_state = getImageState(dev_data, dstImage);
8641    if (cb_node && src_image_state && dst_image_state) {
8642        skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage",
8643                                              VALIDATION_ERROR_02194);
8644        skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage",
8645                                              VALIDATION_ERROR_02195);
8646        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdBlitImage()", VALIDATION_ERROR_02539);
8647        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdBlitImage()", VALIDATION_ERROR_02540);
8648        // Update bindings between images and cmd buffer
8649        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8650        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8651        // Validate that SRC & DST images have correct usage flags set
8652        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8653                                             VALIDATION_ERROR_02182, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8654        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8655                                             VALIDATION_ERROR_02186, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8656        std::function<bool()> function = [=]() {
8657            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdBlitImage()");
8658        };
8659        cb_node->validate_functions.push_back(function);
8660        function = [=]() {
8661            SetImageMemoryValid(dev_data, dst_image_state, true);
8662            return false;
8663        };
8664        cb_node->validate_functions.push_back(function);
8665
8666        skip_call |= ValidateCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8667        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BLITIMAGE);
8668        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()", VALIDATION_ERROR_01300);
8669    } else {
8670        assert(0);
8671    }
8672    lock.unlock();
8673    if (!skip_call)
8674        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8675                                              pRegions, filter);
8676}
8677
8678VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8679                                                VkImage dstImage, VkImageLayout dstImageLayout,
8680                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8681    bool skip_call = false;
8682    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8683    std::unique_lock<std::mutex> lock(global_lock);
8684
8685    auto cb_node = getCBNode(dev_data, commandBuffer);
8686    auto src_buff_state = getBufferState(dev_data, srcBuffer);
8687    auto dst_image_state = getImageState(dev_data, dstImage);
8688    if (cb_node && src_buff_state && dst_image_state) {
8689        skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT,
8690                                              "vkCmdCopyBufferToImage(): dstImage", VALIDATION_ERROR_01232);
8691        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02535);
8692        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02536);
8693        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
8694        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8695        skip_call |=
8696            ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, VALIDATION_ERROR_01230,
8697                                     "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8698        skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8699                                             VALIDATION_ERROR_01231, "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8700        std::function<bool()> function = [=]() {
8701            SetImageMemoryValid(dev_data, dst_image_state, true);
8702            return false;
8703        };
8704        cb_node->validate_functions.push_back(function);
8705        function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBufferToImage()"); };
8706        cb_node->validate_functions.push_back(function);
8707
8708        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8709        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE);
8710        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_01242);
8711        for (uint32_t i = 0; i < regionCount; ++i) {
8712            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout,
8713                                               VALIDATION_ERROR_01234);
8714            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8715                                                                                "vkCmdCopyBufferToImage()");
8716        }
8717    } else {
8718        assert(0);
8719    }
8720    lock.unlock();
8721    if (!skip_call)
8722        dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
8723}
8724
8725VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8726                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8727                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8728    bool skip_call = false;
8729    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8730    std::unique_lock<std::mutex> lock(global_lock);
8731
8732    auto cb_node = getCBNode(dev_data, commandBuffer);
8733    auto src_image_state = getImageState(dev_data, srcImage);
8734    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8735    if (cb_node && src_image_state && dst_buff_state) {
8736        skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT,
8737                                              "vkCmdCopyImageToBuffer(): srcImage", VALIDATION_ERROR_01249);
8738        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02537);
8739        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02538);
8740        // Update bindings between buffer/image and cmd buffer
8741        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8742        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8743        // Validate that SRC image & DST buffer have correct usage flags set
8744        skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8745                                             VALIDATION_ERROR_01248, "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8746        skip_call |=
8747            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01252,
8748                                     "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8749        std::function<bool()> function = [=]() {
8750            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImageToBuffer()");
8751        };
8752        cb_node->validate_functions.push_back(function);
8753        function = [=]() {
8754            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8755            return false;
8756        };
8757        cb_node->validate_functions.push_back(function);
8758
8759        skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8760        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER);
8761        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_01260);
8762        for (uint32_t i = 0; i < regionCount; ++i) {
8763            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout,
8764                                                 VALIDATION_ERROR_01251);
8765            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_image_state, &pRegions[i], i,
8766                                                                                "CmdCopyImageToBuffer");
8767        }
8768    } else {
8769        assert(0);
8770    }
8771    lock.unlock();
8772    if (!skip_call)
8773        dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
8774}
8775
8776VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8777                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8778    bool skip_call = false;
8779    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8780    std::unique_lock<std::mutex> lock(global_lock);
8781
8782    auto cb_node = getCBNode(dev_data, commandBuffer);
8783    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8784    if (cb_node && dst_buff_state) {
8785        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_02530);
8786        // Update bindings between buffer and cmd buffer
8787        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8788        // Validate that DST buffer has correct usage flags set
8789        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8790                                              VALIDATION_ERROR_01146, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8791        std::function<bool()> function = [=]() {
8792            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8793            return false;
8794        };
8795        cb_node->validate_functions.push_back(function);
8796
8797        skip_call |= ValidateCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8798        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_UPDATEBUFFER);
8799        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdUpdateBuffer()", VALIDATION_ERROR_01155);
8800    } else {
8801        assert(0);
8802    }
8803    lock.unlock();
8804    if (!skip_call)
8805        dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8806}
8807
8808VKAPI_ATTR void VKAPI_CALL
8809CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8810    bool skip_call = false;
8811    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8812    std::unique_lock<std::mutex> lock(global_lock);
8813
8814    auto cb_node = getCBNode(dev_data, commandBuffer);
8815    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8816    if (cb_node && dst_buff_state) {
8817        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdFillBuffer()", VALIDATION_ERROR_02529);
8818        // Update bindings between buffer and cmd buffer
8819        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8820        // Validate that DST buffer has correct usage flags set
8821        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8822                                              VALIDATION_ERROR_01137, "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8823        std::function<bool()> function = [=]() {
8824            SetBufferMemoryValid(dev_data, dst_buff_state, true);
8825            return false;
8826        };
8827        cb_node->validate_functions.push_back(function);
8828
8829        skip_call |= ValidateCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8830        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_FILLBUFFER);
8831        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdFillBuffer()", VALIDATION_ERROR_01142);
8832    } else {
8833        assert(0);
8834    }
8835    lock.unlock();
8836    if (!skip_call)
8837        dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8838}
8839
8840// Returns true if sub_rect is entirely contained within rect
8841static inline bool ContainsRect(VkRect2D rect, VkRect2D sub_rect) {
8842    if ((sub_rect.offset.x < rect.offset.x) ||
8843        (sub_rect.offset.x + sub_rect.extent.width > rect.offset.x + rect.extent.width) ||
8844        (sub_rect.offset.y < rect.offset.y) ||
8845        (sub_rect.offset.y + sub_rect.extent.height > rect.offset.y + rect.extent.height))
8846        return false;
8847    return true;
8848}
8849
8850VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8851                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
8852                                               const VkClearRect *pRects) {
8853    bool skip = false;
8854    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8855    std::unique_lock<std::mutex> lock(global_lock);
8856    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
8857    if (cb_node) {
8858        skip |= ValidateCmd(dev_data, cb_node, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8859        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARATTACHMENTS);
8860        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8861        if (!hasDrawCmd(cb_node) && (cb_node->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8862            (cb_node->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8863            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8864            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8865            // call CmdClearAttachments. Otherwise this seems more like a performance warning.
8866            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8867                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer), 0,
8868                            DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8869                            "vkCmdClearAttachments() issued on command buffer object 0x%p prior to any Draw Cmds."
8870                            " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8871                            commandBuffer);
8872        }
8873        skip |= outsideRenderPass(dev_data, cb_node, "vkCmdClearAttachments()", VALIDATION_ERROR_01122);
8874    }
8875
8876    // Validate that attachment is in reference list of active subpass
8877    if (cb_node->activeRenderPass) {
8878        const VkRenderPassCreateInfo *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
8879        const VkSubpassDescription *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
8880        auto framebuffer = getFramebufferState(dev_data, cb_node->activeFramebuffer);
8881
8882        for (uint32_t i = 0; i < attachmentCount; i++) {
8883            auto clear_desc = &pAttachments[i];
8884            VkImageView image_view = VK_NULL_HANDLE;
8885
8886            if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8887                if (clear_desc->colorAttachment >= subpass_desc->colorAttachmentCount) {
8888                    skip |= log_msg(
8889                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8890                        (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_01114, "DS",
8891                        "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d. %s",
8892                        clear_desc->colorAttachment, cb_node->activeSubpass, validation_error_map[VALIDATION_ERROR_01114]);
8893                } else if (subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
8894                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8895                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
8896                                    DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8897                                    "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored.",
8898                                    clear_desc->colorAttachment);
8899                } else {
8900                    image_view = framebuffer->createInfo
8901                                     .pAttachments[subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment];
8902                }
8903            } else if (clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8904                if (!subpass_desc->pDepthStencilAttachment || // Says no DS will be used in active subpass
8905                    (subpass_desc->pDepthStencilAttachment->attachment ==
8906                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8907
8908                    skip |=
8909                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8910                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
8911                                DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8912                                "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
8913                } else {
8914                    image_view = framebuffer->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment];
8915                }
8916            }
8917
8918            if (image_view) {
8919                auto image_view_state = getImageViewState(dev_data, image_view);
8920                for (uint32_t j = 0; j < rectCount; j++) {
8921                    // The rectangular region specified by a given element of pRects must be contained within the render area of the
8922                    // current render pass instance
8923                    if (false == ContainsRect(cb_node->activeRenderPassBeginInfo.renderArea, pRects[j].rect)) {
8924                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8925                                        VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, VALIDATION_ERROR_01115, "DS",
8926                                        "vkCmdClearAttachments(): The area defined by pRects[%d] is not contained in the area of "
8927                                        "the current render pass instance. %s",
8928                                        j, validation_error_map[VALIDATION_ERROR_01115]);
8929                    }
8930                    // The layers specified by a given element of pRects must be contained within every attachment that
8931                    // pAttachments refers to
8932                    auto attachment_base_array_layer = image_view_state->create_info.subresourceRange.baseArrayLayer;
8933                    auto attachment_layer_count = image_view_state->create_info.subresourceRange.layerCount;
8934                    if ((pRects[j].baseArrayLayer < attachment_base_array_layer) || pRects[j].layerCount > attachment_layer_count) {
8935                        skip |=
8936                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8937                                    0, __LINE__, VALIDATION_ERROR_01116, "DS",
8938                                    "vkCmdClearAttachments(): The layers defined in pRects[%d] are not contained in the layers of "
8939                                    "pAttachment[%d]. %s",
8940                                    j, i, validation_error_map[VALIDATION_ERROR_01116]);
8941                    }
8942                }
8943            }
8944        }
8945    }
8946    lock.unlock();
8947    if (!skip)
8948        dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8949}
8950
8951VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8952                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
8953                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8954    bool skip_call = false;
8955    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8956    std::unique_lock<std::mutex> lock(global_lock);
8957    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8958
8959    auto cb_node = getCBNode(dev_data, commandBuffer);
8960    auto image_state = getImageState(dev_data, image);
8961    if (cb_node && image_state) {
8962        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearColorImage()", VALIDATION_ERROR_02527);
8963        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
8964        std::function<bool()> function = [=]() {
8965            SetImageMemoryValid(dev_data, image_state, true);
8966            return false;
8967        };
8968        cb_node->validate_functions.push_back(function);
8969
8970        skip_call |= ValidateCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8971        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE);
8972        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()", VALIDATION_ERROR_01096);
8973    } else {
8974        assert(0);
8975    }
8976    for (uint32_t i = 0; i < rangeCount; ++i) {
8977        skip_call |= VerifyClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout, "vkCmdClearColorImage()");
8978    }
8979    lock.unlock();
8980    if (!skip_call)
8981        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8982}
8983
8984VKAPI_ATTR void VKAPI_CALL
8985CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8986                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8987                          const VkImageSubresourceRange *pRanges) {
8988    bool skip_call = false;
8989    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8990    std::unique_lock<std::mutex> lock(global_lock);
8991    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8992
8993    auto cb_node = getCBNode(dev_data, commandBuffer);
8994    auto image_state = getImageState(dev_data, image);
8995    if (cb_node && image_state) {
8996        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearDepthStencilImage()", VALIDATION_ERROR_02528);
8997        AddCommandBufferBindingImage(dev_data, cb_node, image_state);
8998        std::function<bool()> function = [=]() {
8999            SetImageMemoryValid(dev_data, image_state, true);
9000            return false;
9001        };
9002        cb_node->validate_functions.push_back(function);
9003
9004        skip_call |= ValidateCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
9005        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE);
9006        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()", VALIDATION_ERROR_01111);
9007    } else {
9008        assert(0);
9009    }
9010    for (uint32_t i = 0; i < rangeCount; ++i) {
9011        skip_call |= VerifyClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()");
9012    }
9013    lock.unlock();
9014    if (!skip_call)
9015        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
9016}
9017
9018VKAPI_ATTR void VKAPI_CALL
9019CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
9020                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
9021    bool skip_call = false;
9022    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9023    std::unique_lock<std::mutex> lock(global_lock);
9024
9025    auto cb_node = getCBNode(dev_data, commandBuffer);
9026    auto src_image_state = getImageState(dev_data, srcImage);
9027    auto dst_image_state = getImageState(dev_data, dstImage);
9028    if (cb_node && src_image_state && dst_image_state) {
9029        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdResolveImage()", VALIDATION_ERROR_02541);
9030        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdResolveImage()", VALIDATION_ERROR_02542);
9031        // Update bindings between images and cmd buffer
9032        AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
9033        AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
9034        std::function<bool()> function = [=]() {
9035            return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdResolveImage()");
9036        };
9037        cb_node->validate_functions.push_back(function);
9038        function = [=]() {
9039            SetImageMemoryValid(dev_data, dst_image_state, true);
9040            return false;
9041        };
9042        cb_node->validate_functions.push_back(function);
9043
9044        skip_call |= ValidateCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
9045        UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_RESOLVEIMAGE);
9046        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()", VALIDATION_ERROR_01335);
9047    } else {
9048        assert(0);
9049    }
9050    lock.unlock();
9051    if (!skip_call)
9052        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
9053                                                 pRegions);
9054}
9055
9056bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
9057    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9058    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9059    if (pCB) {
9060        pCB->eventToStageMap[event] = stageMask;
9061    }
9062    auto queue_data = dev_data->queueMap.find(queue);
9063    if (queue_data != dev_data->queueMap.end()) {
9064        queue_data->second.eventToStageMap[event] = stageMask;
9065    }
9066    return false;
9067}
9068
9069VKAPI_ATTR void VKAPI_CALL
9070CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
9071    bool skip_call = false;
9072    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9073    std::unique_lock<std::mutex> lock(global_lock);
9074    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9075    if (pCB) {
9076        skip_call |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
9077        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETEVENT);
9078        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_00238);
9079        auto event_state = getEventNode(dev_data, event);
9080        if (event_state) {
9081            addCommandBufferBinding(&event_state->cb_bindings,
9082                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
9083            event_state->cb_bindings.insert(pCB);
9084        }
9085        pCB->events.push_back(event);
9086        if (!pCB->waitedEvents.count(event)) {
9087            pCB->writeEventsBeforeWait.push_back(event);
9088        }
9089        std::function<bool(VkQueue)> eventUpdate =
9090            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
9091        pCB->eventUpdates.push_back(eventUpdate);
9092    }
9093    lock.unlock();
9094    if (!skip_call)
9095        dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
9096}
9097
9098VKAPI_ATTR void VKAPI_CALL
9099CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
9100    bool skip_call = false;
9101    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9102    std::unique_lock<std::mutex> lock(global_lock);
9103    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9104    if (pCB) {
9105        skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
9106        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETEVENT);
9107        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_00249);
9108        auto event_state = getEventNode(dev_data, event);
9109        if (event_state) {
9110            addCommandBufferBinding(&event_state->cb_bindings,
9111                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
9112            event_state->cb_bindings.insert(pCB);
9113        }
9114        pCB->events.push_back(event);
9115        if (!pCB->waitedEvents.count(event)) {
9116            pCB->writeEventsBeforeWait.push_back(event);
9117        }
9118        std::function<bool(VkQueue)> eventUpdate =
9119            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
9120        pCB->eventUpdates.push_back(eventUpdate);
9121    }
9122    lock.unlock();
9123    if (!skip_call)
9124        dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
9125}
9126
9127static bool TransitionImageAspectLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkImageMemoryBarrier *mem_barrier,
9128                                        uint32_t level, uint32_t layer, VkImageAspectFlags aspect)
9129{
9130    if (!(mem_barrier->subresourceRange.aspectMask & aspect)) {
9131        return false;
9132    }
9133    VkImageSubresource sub = {aspect, level, layer};
9134    IMAGE_CMD_BUF_LAYOUT_NODE node;
9135    if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
9136        SetLayout(pCB, mem_barrier->image, sub,
9137                  IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
9138        return false;
9139    }
9140    bool skip = false;
9141    if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
9142        // TODO: Set memory invalid which is in mem_tracker currently
9143    } else if (node.layout != mem_barrier->oldLayout) {
9144        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9145                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9146                        "You cannot transition the layout of aspect %d from %s when current layout is %s.",
9147                        aspect, string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
9148    }
9149    SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
9150    return skip;
9151}
9152
9153// TODO: Separate validation and layout state updates
9154static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
9155                                   const VkImageMemoryBarrier *pImgMemBarriers) {
9156    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9157    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9158    bool skip = false;
9159    uint32_t levelCount = 0;
9160    uint32_t layerCount = 0;
9161
9162    for (uint32_t i = 0; i < memBarrierCount; ++i) {
9163        auto mem_barrier = &pImgMemBarriers[i];
9164        if (!mem_barrier)
9165            continue;
9166        // TODO: Do not iterate over every possibility - consolidate where
9167        // possible
9168        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
9169
9170        for (uint32_t j = 0; j < levelCount; j++) {
9171            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
9172            for (uint32_t k = 0; k < layerCount; k++) {
9173                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
9174                skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_COLOR_BIT);
9175                skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_DEPTH_BIT);
9176                skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_STENCIL_BIT);
9177                skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_METADATA_BIT);
9178            }
9179        }
9180    }
9181    return skip;
9182}
9183
9184// Print readable FlagBits in FlagMask
9185static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
9186    std::string result;
9187    std::string separator;
9188
9189    if (accessMask == 0) {
9190        result = "[None]";
9191    } else {
9192        result = "[";
9193        for (auto i = 0; i < 32; i++) {
9194            if (accessMask & (1 << i)) {
9195                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
9196                separator = " | ";
9197            }
9198        }
9199        result = result + "]";
9200    }
9201    return result;
9202}
9203
9204// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
9205// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
9206// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
9207static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
9208                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
9209                             const char *type) {
9210    bool skip_call = false;
9211
9212    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
9213        if (accessMask & ~(required_bit | optional_bits)) {
9214            // TODO: Verify against Valid Use
9215            skip_call |=
9216                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9217                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
9218                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
9219        }
9220    } else {
9221        if (!required_bit) {
9222            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9223                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
9224                                                                  "%s when layout is %s, unless the app has previously added a "
9225                                                                  "barrier for this transition.",
9226                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
9227                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
9228        } else {
9229            std::string opt_bits;
9230            if (optional_bits != 0) {
9231                std::stringstream ss;
9232                ss << optional_bits;
9233                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
9234            }
9235            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9236                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
9237                                                                  "layout is %s, unless the app has previously added a barrier for "
9238                                                                  "this transition.",
9239                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
9240                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
9241        }
9242    }
9243    return skip_call;
9244}
9245
9246static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
9247                                        const VkImageLayout &layout, const char *type) {
9248    bool skip_call = false;
9249    switch (layout) {
9250    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
9251        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
9252                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
9253        break;
9254    }
9255    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
9256        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
9257                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
9258        break;
9259    }
9260    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
9261        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
9262        break;
9263    }
9264    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
9265        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
9266                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
9267                                      VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
9268        break;
9269    }
9270    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
9271        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
9272                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
9273        break;
9274    }
9275    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
9276        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
9277        break;
9278    }
9279    case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: {
9280        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type);
9281        break;
9282    }
9283    case VK_IMAGE_LAYOUT_UNDEFINED: {
9284        if (accessMask != 0) {
9285            // TODO: Verify against Valid Use section spec
9286            skip_call |=
9287                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9288                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
9289                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
9290        }
9291        break;
9292    }
9293    case VK_IMAGE_LAYOUT_GENERAL:
9294    default: { break; }
9295    }
9296    return skip_call;
9297}
9298
9299static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
9300                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
9301                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
9302                             const VkImageMemoryBarrier *pImageMemBarriers) {
9303    bool skip = false;
9304    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9305    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9306    if (pCB->activeRenderPass && memBarrierCount) {
9307        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
9308            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9309                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
9310                                                             "with no self dependency specified.",
9311                            funcName, pCB->activeSubpass);
9312        }
9313    }
9314    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
9315        auto mem_barrier = &pImageMemBarriers[i];
9316        auto image_data = getImageState(dev_data, mem_barrier->image);
9317        if (image_data) {
9318            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
9319            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
9320            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
9321                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
9322                // be VK_QUEUE_FAMILY_IGNORED
9323                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
9324                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9325                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9326                                    "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
9327                                    "VK_SHARING_MODE_CONCURRENT. Src and dst "
9328                                    "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
9329                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
9330                }
9331            } else {
9332                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
9333                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
9334                // or both be a valid queue family
9335                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
9336                    (src_q_f_index != dst_q_f_index)) {
9337                    skip |=
9338                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9339                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
9340                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
9341                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
9342                                                                     "must be.",
9343                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
9344                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
9345                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
9346                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
9347                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9348                                    __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9349                                    "%s: Image 0x%" PRIx64 " was created with sharingMode "
9350                                    "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
9351                                    " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
9352                                    "queueFamilies crated for this device.",
9353                                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, dst_q_f_index,
9354                                    dev_data->phys_dev_properties.queue_family_properties.size());
9355                }
9356            }
9357        }
9358
9359        if (mem_barrier) {
9360            if (mem_barrier->oldLayout != mem_barrier->newLayout) {
9361                skip |=
9362                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
9363                skip |=
9364                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
9365            }
9366            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
9367                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9368                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
9369                                                                 "PREINITIALIZED.",
9370                                funcName);
9371            }
9372            auto image_data = getImageState(dev_data, mem_barrier->image);
9373            VkFormat format = VK_FORMAT_UNDEFINED;
9374            uint32_t arrayLayers = 0, mipLevels = 0;
9375            bool imageFound = false;
9376            if (image_data) {
9377                format = image_data->createInfo.format;
9378                arrayLayers = image_data->createInfo.arrayLayers;
9379                mipLevels = image_data->createInfo.mipLevels;
9380                imageFound = true;
9381            } else if (dev_data->device_extensions.wsi_enabled) {
9382                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
9383                if (imageswap_data) {
9384                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
9385                    if (swapchain_data) {
9386                        format = swapchain_data->createInfo.imageFormat;
9387                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
9388                        mipLevels = 1;
9389                        imageFound = true;
9390                    }
9391                }
9392            }
9393            if (imageFound) {
9394                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
9395                skip |= ValidateImageAspectMask(dev_data, image_data->image, format, aspect_mask, funcName);
9396                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
9397                                     ? 1
9398                                     : mem_barrier->subresourceRange.layerCount;
9399                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
9400                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9401                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
9402                                                                               "baseArrayLayer (%d) and layerCount (%d) be less "
9403                                                                               "than or equal to the total number of layers (%d).",
9404                                    funcName, mem_barrier->subresourceRange.baseArrayLayer,
9405                                    mem_barrier->subresourceRange.layerCount, arrayLayers);
9406                }
9407                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
9408                                     ? 1
9409                                     : mem_barrier->subresourceRange.levelCount;
9410                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
9411                    skip |= log_msg(
9412                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9413                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
9414                                                         "(%d) and levelCount (%d) be less than or equal to "
9415                                                         "the total number of levels (%d).",
9416                        funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount, mipLevels);
9417                }
9418            }
9419        }
9420    }
9421    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
9422        auto mem_barrier = &pBufferMemBarriers[i];
9423        if (pCB->activeRenderPass) {
9424            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9425                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
9426        }
9427        if (!mem_barrier)
9428            continue;
9429
9430        // Validate buffer barrier queue family indices
9431        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
9432             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
9433            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
9434             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
9435            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9436                            DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9437                            "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
9438                            "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
9439                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9440                            dev_data->phys_dev_properties.queue_family_properties.size());
9441        }
9442
9443        auto buffer_state = getBufferState(dev_data, mem_barrier->buffer);
9444        if (buffer_state) {
9445            auto buffer_size = buffer_state->requirements.size;
9446            if (mem_barrier->offset >= buffer_size) {
9447                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9448                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64
9449                                                                 " which is not less than total size 0x%" PRIx64 ".",
9450                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9451                                reinterpret_cast<const uint64_t &>(mem_barrier->offset),
9452                                reinterpret_cast<const uint64_t &>(buffer_size));
9453            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
9454                skip |= log_msg(
9455                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9456                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
9457                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
9458                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9459                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
9460                    reinterpret_cast<const uint64_t &>(buffer_size));
9461            }
9462        }
9463    }
9464    return skip;
9465}
9466
9467bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
9468    bool skip_call = false;
9469    VkPipelineStageFlags stageMask = 0;
9470    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9471    for (uint32_t i = 0; i < eventCount; ++i) {
9472        auto event = pCB->events[firstEventIndex + i];
9473        auto queue_data = dev_data->queueMap.find(queue);
9474        if (queue_data == dev_data->queueMap.end())
9475            return false;
9476        auto event_data = queue_data->second.eventToStageMap.find(event);
9477        if (event_data != queue_data->second.eventToStageMap.end()) {
9478            stageMask |= event_data->second;
9479        } else {
9480            auto global_event_data = getEventNode(dev_data, event);
9481            if (!global_event_data) {
9482                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9483                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
9484                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
9485                                     reinterpret_cast<const uint64_t &>(event));
9486            } else {
9487                stageMask |= global_event_data->stageMask;
9488            }
9489        }
9490    }
9491    // TODO: Need to validate that host_bit is only set if set event is called
9492    // but set event can be called at any time.
9493    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
9494        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9495                             VALIDATION_ERROR_00254, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
9496                                                           "using srcStageMask 0x%X which must be the bitwise "
9497                                                           "OR of the stageMask parameters used in calls to "
9498                                                           "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
9499                                                           "used with vkSetEvent but instead is 0x%X. %s",
9500                             sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_00254]);
9501    }
9502    return skip_call;
9503}
9504
9505// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
9506static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
9507    {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
9508    {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
9509    {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
9510    {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9511    {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9512    {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9513    {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9514    {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9515    {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
9516    {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
9517    {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
9518    {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
9519    {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
9520    {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
9521
9522static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
9523                                                            VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
9524                                                            VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
9525                                                            VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
9526                                                            VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
9527                                                            VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
9528                                                            VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
9529                                                            VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
9530                                                            VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
9531                                                            VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
9532                                                            VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
9533                                                            VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
9534                                                            VK_PIPELINE_STAGE_TRANSFER_BIT,
9535                                                            VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
9536
9537bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
9538                                      VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
9539                                      UNIQUE_VALIDATION_ERROR_CODE error_code) {
9540    bool skip = false;
9541    // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
9542    for (const auto &item : stage_flag_bit_array) {
9543        if (stage_mask & item) {
9544            if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
9545                skip |=
9546                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9547                            reinterpret_cast<uint64_t &>(command_buffer), __LINE__, error_code, "DL",
9548                            "%s(): %s flag %s is not compatible with the queue family properties of this "
9549                            "command buffer. %s",
9550                            function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
9551                            validation_error_map[error_code]);
9552            }
9553        }
9554    }
9555    return skip;
9556}
9557
9558bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
9559                                                VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
9560                                                const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
9561    bool skip = false;
9562    uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
9563    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
9564    auto physical_device_state = getPhysicalDeviceState(instance_data, dev_data->physical_device);
9565
9566    // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
9567    // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
9568    // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
9569
9570    if (queue_family_index < physical_device_state->queue_family_properties.size()) {
9571        VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
9572
9573        if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
9574            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
9575                                                     function, "srcStageMask", error_code);
9576        }
9577        if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
9578            skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
9579                                                     function, "dstStageMask", error_code);
9580        }
9581    }
9582    return skip;
9583}
9584
9585VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
9586                                         VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
9587                                         uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9588                                         uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9589                                         uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9590    bool skip = false;
9591    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9592    std::unique_lock<std::mutex> lock(global_lock);
9593    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
9594    if (cb_state) {
9595        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
9596                                                           VALIDATION_ERROR_02510);
9597        auto first_event_index = cb_state->events.size();
9598        for (uint32_t i = 0; i < eventCount; ++i) {
9599            auto event_state = getEventNode(dev_data, pEvents[i]);
9600            if (event_state) {
9601                addCommandBufferBinding(&event_state->cb_bindings,
9602                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
9603                                        cb_state);
9604                event_state->cb_bindings.insert(cb_state);
9605            }
9606            cb_state->waitedEvents.insert(pEvents[i]);
9607            cb_state->events.push_back(pEvents[i]);
9608        }
9609        std::function<bool(VkQueue)> event_update =
9610            std::bind(validateEventStageMask, std::placeholders::_1, cb_state, eventCount, first_event_index, sourceStageMask);
9611        cb_state->eventUpdates.push_back(event_update);
9612        if (cb_state->state == CB_RECORDING) {
9613            skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
9614            UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_WAITEVENTS);
9615        } else {
9616            skip |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
9617        }
9618        skip |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9619        skip |= ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9620                                 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9621    }
9622    lock.unlock();
9623    if (!skip)
9624        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
9625                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9626                                               imageMemoryBarrierCount, pImageMemoryBarriers);
9627}
9628
9629VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
9630                                              VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
9631                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9632                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9633                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9634    bool skip = false;
9635    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9636    std::unique_lock<std::mutex> lock(global_lock);
9637    GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
9638    if (cb_state) {
9639        skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
9640                                                           VALIDATION_ERROR_02513);
9641        skip |= ValidateCmd(dev_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
9642        UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_PIPELINEBARRIER);
9643        skip |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9644        skip |= ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers,
9645                                 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9646    }
9647    lock.unlock();
9648    if (!skip)
9649        dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
9650                                                    pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9651                                                    imageMemoryBarrierCount, pImageMemoryBarriers);
9652}
9653
9654bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
9655    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9656    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9657    if (pCB) {
9658        pCB->queryToStateMap[object] = value;
9659    }
9660    auto queue_data = dev_data->queueMap.find(queue);
9661    if (queue_data != dev_data->queueMap.end()) {
9662        queue_data->second.queryToStateMap[object] = value;
9663    }
9664    return false;
9665}
9666
9667VKAPI_ATTR void VKAPI_CALL
9668CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
9669    bool skip_call = false;
9670    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9671    std::unique_lock<std::mutex> lock(global_lock);
9672    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9673    if (pCB) {
9674        QueryObject query = {queryPool, slot};
9675        pCB->activeQueries.insert(query);
9676        if (!pCB->startedQueries.count(query)) {
9677            pCB->startedQueries.insert(query);
9678        }
9679        skip_call |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
9680        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BEGINQUERY);
9681        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9682                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9683    }
9684    lock.unlock();
9685    if (!skip_call)
9686        dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
9687}
9688
9689VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
9690    bool skip_call = false;
9691    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9692    std::unique_lock<std::mutex> lock(global_lock);
9693    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9694    if (pCB) {
9695        QueryObject query = {queryPool, slot};
9696        if (!pCB->activeQueries.count(query)) {
9697            skip_call |=
9698                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9699                        VALIDATION_ERROR_01041, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
9700                        (uint64_t)(queryPool), slot, validation_error_map[VALIDATION_ERROR_01041]);
9701        } else {
9702            pCB->activeQueries.erase(query);
9703        }
9704        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9705        pCB->queryUpdates.push_back(queryUpdate);
9706        if (pCB->state == CB_RECORDING) {
9707            skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
9708            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDQUERY);
9709        } else {
9710            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
9711        }
9712        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9713                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9714    }
9715    lock.unlock();
9716    if (!skip_call)
9717        dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
9718}
9719
9720VKAPI_ATTR void VKAPI_CALL
9721CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
9722    bool skip_call = false;
9723    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9724    std::unique_lock<std::mutex> lock(global_lock);
9725    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9726    if (pCB) {
9727        for (uint32_t i = 0; i < queryCount; i++) {
9728            QueryObject query = {queryPool, firstQuery + i};
9729            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
9730            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
9731            pCB->queryUpdates.push_back(queryUpdate);
9732        }
9733        if (pCB->state == CB_RECORDING) {
9734            skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9735            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETQUERYPOOL);
9736        } else {
9737            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
9738        }
9739        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetQueryPool()", VALIDATION_ERROR_01025);
9740        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9741                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9742    }
9743    lock.unlock();
9744    if (!skip_call)
9745        dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
9746}
9747
9748bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9749    bool skip_call = false;
9750    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9751    auto queue_data = dev_data->queueMap.find(queue);
9752    if (queue_data == dev_data->queueMap.end())
9753        return false;
9754    for (uint32_t i = 0; i < queryCount; i++) {
9755        QueryObject query = {queryPool, firstQuery + i};
9756        auto query_data = queue_data->second.queryToStateMap.find(query);
9757        bool fail = false;
9758        if (query_data != queue_data->second.queryToStateMap.end()) {
9759            if (!query_data->second) {
9760                fail = true;
9761            }
9762        } else {
9763            auto global_query_data = dev_data->queryToStateMap.find(query);
9764            if (global_query_data != dev_data->queryToStateMap.end()) {
9765                if (!global_query_data->second) {
9766                    fail = true;
9767                }
9768            } else {
9769                fail = true;
9770            }
9771        }
9772        if (fail) {
9773            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9774                                 DRAWSTATE_INVALID_QUERY, "DS",
9775                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9776                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9777        }
9778    }
9779    return skip_call;
9780}
9781
9782VKAPI_ATTR void VKAPI_CALL
9783CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
9784                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
9785    bool skip_call = false;
9786    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9787    std::unique_lock<std::mutex> lock(global_lock);
9788
9789    auto cb_node = getCBNode(dev_data, commandBuffer);
9790    auto dst_buff_state = getBufferState(dev_data, dstBuffer);
9791    if (cb_node && dst_buff_state) {
9792        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_02526);
9793        // Update bindings between buffer and cmd buffer
9794        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
9795        // Validate that DST buffer has correct usage flags set
9796        skip_call |=
9797            ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01066,
9798                                     "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
9799        std::function<bool()> function = [=]() {
9800            SetBufferMemoryValid(dev_data, dst_buff_state, true);
9801            return false;
9802        };
9803        cb_node->validate_functions.push_back(function);
9804        std::function<bool(VkQueue)> queryUpdate =
9805            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9806        cb_node->queryUpdates.push_back(queryUpdate);
9807        if (cb_node->state == CB_RECORDING) {
9808            skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9809            UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS);
9810        } else {
9811            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
9812        }
9813        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_01074);
9814        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9815                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
9816    } else {
9817        assert(0);
9818    }
9819    lock.unlock();
9820    if (!skip_call)
9821        dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
9822                                                         stride, flags);
9823}
9824
9825VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9826                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9827                                            const void *pValues) {
9828    bool skip_call = false;
9829    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9830    std::unique_lock<std::mutex> lock(global_lock);
9831    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9832    if (pCB) {
9833        if (pCB->state == CB_RECORDING) {
9834            skip_call |= ValidateCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9835            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_PUSHCONSTANTS);
9836        } else {
9837            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9838        }
9839    }
9840    skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
9841    if (0 == stageFlags) {
9842        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9843                             VALIDATION_ERROR_00996, "DS", "vkCmdPushConstants() call has no stageFlags set. %s",
9844                             validation_error_map[VALIDATION_ERROR_00996]);
9845    }
9846
9847    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
9848    auto pipeline_layout = getPipelineLayout(dev_data, layout);
9849    // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9850    // contained in the pipeline ranges.
9851    // Build a {start, end} span list for ranges with matching stage flags.
9852    const auto &ranges = pipeline_layout->push_constant_ranges;
9853    struct span {
9854        uint32_t start;
9855        uint32_t end;
9856    };
9857    std::vector<span> spans;
9858    spans.reserve(ranges.size());
9859    for (const auto &iter : ranges) {
9860        if (iter.stageFlags == stageFlags) {
9861            spans.push_back({iter.offset, iter.offset + iter.size});
9862        }
9863    }
9864    if (spans.size() == 0) {
9865        // There were no ranges that matched the stageFlags.
9866        skip_call |=
9867            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9868                    VALIDATION_ERROR_00988, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9869                                                  "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ". %s",
9870                    (uint32_t)stageFlags, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
9871    } else {
9872        // Sort span list by start value.
9873        struct comparer {
9874            bool operator()(struct span i, struct span j) { return i.start < j.start; }
9875        } my_comparer;
9876        std::sort(spans.begin(), spans.end(), my_comparer);
9877
9878        // Examine two spans at a time.
9879        std::vector<span>::iterator current = spans.begin();
9880        std::vector<span>::iterator next = current + 1;
9881        while (next != spans.end()) {
9882            if (current->end < next->start) {
9883                // There is a gap; cannot coalesce. Move to the next two spans.
9884                ++current;
9885                ++next;
9886            } else {
9887                // Coalesce the two spans.  The start of the next span
9888                // is within the current span, so pick the larger of
9889                // the end values to extend the current span.
9890                // Then delete the next span and set next to the span after it.
9891                current->end = max(current->end, next->end);
9892                next = spans.erase(next);
9893            }
9894        }
9895
9896        // Now we can check if the incoming range is within any of the spans.
9897        bool contained_in_a_range = false;
9898        for (uint32_t i = 0; i < spans.size(); ++i) {
9899            if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9900                contained_in_a_range = true;
9901                break;
9902            }
9903        }
9904        if (!contained_in_a_range) {
9905            skip_call |= log_msg(
9906                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9907                VALIDATION_ERROR_00988, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9908                                              "with stageFlags = 0x%" PRIx32 " "
9909                                              "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ". %s",
9910                offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
9911        }
9912    }
9913    lock.unlock();
9914    if (!skip_call)
9915        dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9916}
9917
9918VKAPI_ATTR void VKAPI_CALL
9919CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9920    bool skip_call = false;
9921    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9922    std::unique_lock<std::mutex> lock(global_lock);
9923    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9924    if (pCB) {
9925        QueryObject query = {queryPool, slot};
9926        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9927        pCB->queryUpdates.push_back(queryUpdate);
9928        if (pCB->state == CB_RECORDING) {
9929            skip_call |= ValidateCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9930            UpdateCmdBufferLastCmd(dev_data, pCB, CMD_WRITETIMESTAMP);
9931        } else {
9932            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9933        }
9934    }
9935    lock.unlock();
9936    if (!skip_call)
9937        dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9938}
9939
9940static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9941                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
9942                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
9943    bool skip_call = false;
9944
9945    for (uint32_t attach = 0; attach < count; attach++) {
9946        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9947            // Attachment counts are verified elsewhere, but prevent an invalid access
9948            if (attachments[attach].attachment < fbci->attachmentCount) {
9949                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9950                auto view_state = getImageViewState(dev_data, *image_view);
9951                if (view_state) {
9952                    const VkImageCreateInfo *ici = &getImageState(dev_data, view_state->create_info.image)->createInfo;
9953                    if (ici != nullptr) {
9954                        if ((ici->usage & usage_flag) == 0) {
9955                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9956                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, error_code, "DS",
9957                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9958                                                 "IMAGE_USAGE flags (%s). %s",
9959                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
9960                                                 validation_error_map[error_code]);
9961                        }
9962                    }
9963                }
9964            }
9965        }
9966    }
9967    return skip_call;
9968}
9969
9970// Validate VkFramebufferCreateInfo which includes:
9971// 1. attachmentCount equals renderPass attachmentCount
9972// 2. corresponding framebuffer and renderpass attachments have matching formats
9973// 3. corresponding framebuffer and renderpass attachments have matching sample counts
9974// 4. fb attachments only have a single mip level
9975// 5. fb attachment dimensions are each at least as large as the fb
9976// 6. fb attachments use idenity swizzle
9977// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9978// 8. fb dimensions are within physical device limits
9979static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9980    bool skip_call = false;
9981
9982    auto rp_state = getRenderPassState(dev_data, pCreateInfo->renderPass);
9983    if (rp_state) {
9984        const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
9985        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9986            skip_call |= log_msg(
9987                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9988                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00404, "DS",
9989                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9990                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
9991                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass),
9992                validation_error_map[VALIDATION_ERROR_00404]);
9993        } else {
9994            // attachmentCounts match, so make sure corresponding attachment details line up
9995            const VkImageView *image_views = pCreateInfo->pAttachments;
9996            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9997                auto view_state = getImageViewState(dev_data, image_views[i]);
9998                auto &ivci = view_state->create_info;
9999                if (ivci.format != rpci->pAttachments[i].format) {
10000                    skip_call |= log_msg(
10001                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10002                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00408, "DS",
10003                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
10004                        "the format of "
10005                        "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
10006                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
10007                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00408]);
10008                }
10009                const VkImageCreateInfo *ici = &getImageState(dev_data, ivci.image)->createInfo;
10010                if (ici->samples != rpci->pAttachments[i].samples) {
10011                    skip_call |= log_msg(
10012                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10013                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00409, "DS",
10014                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
10015                        "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
10016                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
10017                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00409]);
10018                }
10019                // Verify that view only has a single mip level
10020                if (ivci.subresourceRange.levelCount != 1) {
10021                    skip_call |=
10022                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10023                                VALIDATION_ERROR_00411, "DS",
10024                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
10025                                "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
10026                                i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_00411]);
10027                }
10028                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
10029                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
10030                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
10031                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
10032                    (mip_height < pCreateInfo->height)) {
10033                    skip_call |=
10034                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10035                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
10036                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
10037                                "than the corresponding "
10038                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
10039                                "dimensions for "
10040                                "attachment #%u, framebuffer:\n"
10041                                "width: %u, %u\n"
10042                                "height: %u, %u\n"
10043                                "layerCount: %u, %u\n",
10044                                i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
10045                                pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
10046                }
10047                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
10048                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
10049                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
10050                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
10051                    skip_call |= log_msg(
10052                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10053                        VALIDATION_ERROR_00412, "DS",
10054                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
10055                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
10056                        "r swizzle = %s\n"
10057                        "g swizzle = %s\n"
10058                        "b swizzle = %s\n"
10059                        "a swizzle = %s\n"
10060                        "%s",
10061                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
10062                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
10063                        validation_error_map[VALIDATION_ERROR_00412]);
10064                }
10065            }
10066        }
10067        // Verify correct attachment usage flags
10068        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
10069            // Verify input attachments:
10070            skip_call |=
10071                MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
10072                           pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_00407);
10073            // Verify color attachments:
10074            skip_call |=
10075                MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
10076                           pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_00405);
10077            // Verify depth/stencil attachments:
10078            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
10079                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
10080                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_00406);
10081            }
10082        }
10083    }
10084    // Verify FB dimensions are within physical device limits
10085    if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
10086        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10087                             VALIDATION_ERROR_00413, "DS",
10088                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
10089                             "Requested width: %u, device max: %u\n"
10090                             "%s",
10091                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
10092                             validation_error_map[VALIDATION_ERROR_00413]);
10093    }
10094    if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
10095        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10096                             VALIDATION_ERROR_00414, "DS",
10097                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
10098                             "Requested height: %u, device max: %u\n"
10099                             "%s",
10100                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
10101                             validation_error_map[VALIDATION_ERROR_00414]);
10102    }
10103    if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
10104        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10105                             VALIDATION_ERROR_00415, "DS",
10106                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
10107                             "Requested layers: %u, device max: %u\n"
10108                             "%s",
10109                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
10110                             validation_error_map[VALIDATION_ERROR_00415]);
10111    }
10112    return skip_call;
10113}
10114
10115// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
10116//  Return true if an error is encountered and callback returns true to skip call down chain
10117//   false indicates that call down chain should proceed
10118static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
10119    // TODO : Verify that renderPass FB is created with is compatible with FB
10120    bool skip_call = false;
10121    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
10122    return skip_call;
10123}
10124
10125// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
10126static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
10127    // Shadow create info and store in map
10128    std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
10129        new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
10130
10131    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
10132        VkImageView view = pCreateInfo->pAttachments[i];
10133        auto view_state = getImageViewState(dev_data, view);
10134        if (!view_state) {
10135            continue;
10136        }
10137        MT_FB_ATTACHMENT_INFO fb_info;
10138        fb_info.mem = getImageState(dev_data, view_state->create_info.image)->binding.mem;
10139        fb_info.view_state = view_state;
10140        fb_info.image = view_state->create_info.image;
10141        fb_state->attachments.push_back(fb_info);
10142    }
10143    dev_data->frameBufferMap[fb] = std::move(fb_state);
10144}
10145
10146VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
10147                                                 const VkAllocationCallbacks *pAllocator,
10148                                                 VkFramebuffer *pFramebuffer) {
10149    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10150    std::unique_lock<std::mutex> lock(global_lock);
10151    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
10152    lock.unlock();
10153
10154    if (skip_call)
10155        return VK_ERROR_VALIDATION_FAILED_EXT;
10156
10157    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
10158
10159    if (VK_SUCCESS == result) {
10160        lock.lock();
10161        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
10162        lock.unlock();
10163    }
10164    return result;
10165}
10166
10167static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
10168                           std::unordered_set<uint32_t> &processed_nodes) {
10169    // If we have already checked this node we have not found a dependency path so return false.
10170    if (processed_nodes.count(index))
10171        return false;
10172    processed_nodes.insert(index);
10173    const DAGNode &node = subpass_to_node[index];
10174    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
10175    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
10176        for (auto elem : node.prev) {
10177            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
10178                return true;
10179        }
10180    } else {
10181        return true;
10182    }
10183    return false;
10184}
10185
10186static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
10187                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
10188    bool result = true;
10189    // Loop through all subpasses that share the same attachment and make sure a dependency exists
10190    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
10191        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
10192            continue;
10193        const DAGNode &node = subpass_to_node[subpass];
10194        // Check for a specified dependency between the two nodes. If one exists we are done.
10195        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
10196        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
10197        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
10198            // If no dependency exits an implicit dependency still might. If not, throw an error.
10199            std::unordered_set<uint32_t> processed_nodes;
10200            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
10201                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
10202                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10203                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10204                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
10205                                     dependent_subpasses[k]);
10206                result = false;
10207            }
10208        }
10209    }
10210    return result;
10211}
10212
10213static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
10214                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
10215    const DAGNode &node = subpass_to_node[index];
10216    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
10217    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
10218    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10219        if (attachment == subpass.pColorAttachments[j].attachment)
10220            return true;
10221    }
10222    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10223        if (attachment == subpass.pDepthStencilAttachment->attachment)
10224            return true;
10225    }
10226    bool result = false;
10227    // Loop through previous nodes and see if any of them write to the attachment.
10228    for (auto elem : node.prev) {
10229        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
10230    }
10231    // If the attachment was written to by a previous node than this node needs to preserve it.
10232    if (result && depth > 0) {
10233        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
10234        bool has_preserved = false;
10235        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10236            if (subpass.pPreserveAttachments[j] == attachment) {
10237                has_preserved = true;
10238                break;
10239            }
10240        }
10241        if (!has_preserved) {
10242            skip_call |=
10243                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10244                        DRAWSTATE_INVALID_RENDERPASS, "DS",
10245                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
10246        }
10247    }
10248    return result;
10249}
10250
10251template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
10252    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
10253           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
10254}
10255
10256bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
10257    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
10258            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
10259}
10260
10261static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
10262                                 RENDER_PASS_STATE const *renderPass) {
10263    bool skip_call = false;
10264    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
10265    auto const pCreateInfo = renderPass->createInfo.ptr();
10266    auto const & subpass_to_node = renderPass->subpassToNode;
10267    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
10268    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
10269    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
10270    // Find overlapping attachments
10271    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
10272        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
10273            VkImageView viewi = pFramebufferInfo->pAttachments[i];
10274            VkImageView viewj = pFramebufferInfo->pAttachments[j];
10275            if (viewi == viewj) {
10276                overlapping_attachments[i].push_back(j);
10277                overlapping_attachments[j].push_back(i);
10278                continue;
10279            }
10280            auto view_state_i = getImageViewState(dev_data, viewi);
10281            auto view_state_j = getImageViewState(dev_data, viewj);
10282            if (!view_state_i || !view_state_j) {
10283                continue;
10284            }
10285            auto view_ci_i = view_state_i->create_info;
10286            auto view_ci_j = view_state_j->create_info;
10287            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
10288                overlapping_attachments[i].push_back(j);
10289                overlapping_attachments[j].push_back(i);
10290                continue;
10291            }
10292            auto image_data_i = getImageState(dev_data, view_ci_i.image);
10293            auto image_data_j = getImageState(dev_data, view_ci_j.image);
10294            if (!image_data_i || !image_data_j) {
10295                continue;
10296            }
10297            if (image_data_i->binding.mem == image_data_j->binding.mem &&
10298                isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
10299                                   image_data_j->binding.size)) {
10300                overlapping_attachments[i].push_back(j);
10301                overlapping_attachments[j].push_back(i);
10302            }
10303        }
10304    }
10305    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
10306        uint32_t attachment = i;
10307        for (auto other_attachment : overlapping_attachments[i]) {
10308            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
10309                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10310                                     __LINE__, VALIDATION_ERROR_00324, "DS", "Attachment %d aliases attachment %d but doesn't "
10311                                                                             "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
10312                                     attachment, other_attachment, validation_error_map[VALIDATION_ERROR_00324]);
10313            }
10314            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
10315                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10316                                     __LINE__, VALIDATION_ERROR_00324, "DS", "Attachment %d aliases attachment %d but doesn't "
10317                                                                             "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
10318                                     other_attachment, attachment, validation_error_map[VALIDATION_ERROR_00324]);
10319            }
10320        }
10321    }
10322    // Find for each attachment the subpasses that use them.
10323    unordered_set<uint32_t> attachmentIndices;
10324    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10325        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10326        attachmentIndices.clear();
10327        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10328            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10329            if (attachment == VK_ATTACHMENT_UNUSED)
10330                continue;
10331            input_attachment_to_subpass[attachment].push_back(i);
10332            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10333                input_attachment_to_subpass[overlapping_attachment].push_back(i);
10334            }
10335        }
10336        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10337            uint32_t attachment = subpass.pColorAttachments[j].attachment;
10338            if (attachment == VK_ATTACHMENT_UNUSED)
10339                continue;
10340            output_attachment_to_subpass[attachment].push_back(i);
10341            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10342                output_attachment_to_subpass[overlapping_attachment].push_back(i);
10343            }
10344            attachmentIndices.insert(attachment);
10345        }
10346        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10347            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10348            output_attachment_to_subpass[attachment].push_back(i);
10349            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10350                output_attachment_to_subpass[overlapping_attachment].push_back(i);
10351            }
10352
10353            if (attachmentIndices.count(attachment)) {
10354                skip_call |=
10355                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10356                            DRAWSTATE_INVALID_RENDERPASS, "DS",
10357                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
10358            }
10359        }
10360    }
10361    // If there is a dependency needed make sure one exists
10362    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10363        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10364        // If the attachment is an input then all subpasses that output must have a dependency relationship
10365        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10366            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10367            if (attachment == VK_ATTACHMENT_UNUSED)
10368                continue;
10369            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10370        }
10371        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
10372        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10373            uint32_t attachment = subpass.pColorAttachments[j].attachment;
10374            if (attachment == VK_ATTACHMENT_UNUSED)
10375                continue;
10376            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10377            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10378        }
10379        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10380            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
10381            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10382            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10383        }
10384    }
10385    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
10386    // written.
10387    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10388        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10389        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10390            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
10391        }
10392    }
10393    return skip_call;
10394}
10395// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
10396// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
10397// READ_ONLY layout attachments don't have CLEAR as their loadOp.
10398static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
10399                                                  const uint32_t attachment,
10400                                                  const VkAttachmentDescription &attachment_description) {
10401    bool skip_call = false;
10402    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
10403    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
10404        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
10405            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
10406            skip_call |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
10407                                 VkDebugReportObjectTypeEXT(0), __LINE__, VALIDATION_ERROR_02351, "DS",
10408                                 "Cannot clear attachment %d with invalid first layout %s. %s", attachment,
10409                                 string_VkImageLayout(first_layout), validation_error_map[VALIDATION_ERROR_02351]);
10410        }
10411    }
10412    return skip_call;
10413}
10414
10415static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
10416    bool skip = false;
10417
10418    // Track when we're observing the first use of an attachment
10419    std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
10420    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10421        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10422        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10423            auto attach_index = subpass.pColorAttachments[j].attachment;
10424            if (attach_index == VK_ATTACHMENT_UNUSED)
10425                continue;
10426
10427            switch (subpass.pColorAttachments[j].layout) {
10428            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
10429                // This is ideal.
10430                break;
10431
10432            case VK_IMAGE_LAYOUT_GENERAL:
10433                // May not be optimal; TODO: reconsider this warning based on other constraints?
10434                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10435                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10436                                "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
10437                break;
10438
10439            default:
10440                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10441                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10442                                "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
10443                                string_VkImageLayout(subpass.pColorAttachments[j].layout));
10444            }
10445
10446            if (attach_first_use[attach_index]) {
10447                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout,
10448                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
10449            }
10450            attach_first_use[attach_index] = false;
10451        }
10452        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10453            switch (subpass.pDepthStencilAttachment->layout) {
10454            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
10455            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
10456                // These are ideal.
10457                break;
10458
10459            case VK_IMAGE_LAYOUT_GENERAL:
10460                // May not be optimal; TODO: reconsider this warning based on other constraints? GENERAL can be better than doing
10461                // a bunch of transitions.
10462                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10463                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10464                                "GENERAL layout for depth attachment may not give optimal performance.");
10465                break;
10466
10467            default:
10468                // No other layouts are acceptable
10469                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10470                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10471                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
10472                                "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
10473                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
10474            }
10475
10476            auto attach_index = subpass.pDepthStencilAttachment->attachment;
10477            if (attach_first_use[attach_index]) {
10478                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout,
10479                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
10480            }
10481            attach_first_use[attach_index] = false;
10482        }
10483        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10484            auto attach_index = subpass.pInputAttachments[j].attachment;
10485            if (attach_index == VK_ATTACHMENT_UNUSED)
10486                continue;
10487
10488            switch (subpass.pInputAttachments[j].layout) {
10489            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
10490            case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
10491                // These are ideal.
10492                break;
10493
10494            case VK_IMAGE_LAYOUT_GENERAL:
10495                // May not be optimal. TODO: reconsider this warning based on other constraints.
10496                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10497                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10498                                "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
10499                break;
10500
10501            default:
10502                // No other layouts are acceptable
10503                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10504                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10505                                "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
10506                                string_VkImageLayout(subpass.pInputAttachments[j].layout));
10507            }
10508
10509            if (attach_first_use[attach_index]) {
10510                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout,
10511                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
10512            }
10513            attach_first_use[attach_index] = false;
10514        }
10515    }
10516    return skip;
10517}
10518
10519static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10520                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
10521    bool skip_call = false;
10522    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10523        DAGNode &subpass_node = subpass_to_node[i];
10524        subpass_node.pass = i;
10525    }
10526    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
10527        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
10528        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
10529            if (dependency.srcSubpass == dependency.dstSubpass) {
10530                skip_call |=
10531                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10532                            DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
10533            }
10534        } else if (dependency.srcSubpass > dependency.dstSubpass) {
10535            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10536                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
10537                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
10538        } else if (dependency.srcSubpass == dependency.dstSubpass) {
10539            has_self_dependency[dependency.srcSubpass] = true;
10540        } else {
10541            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
10542            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
10543        }
10544    }
10545    return skip_call;
10546}
10547
10548
10549VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
10550                                                  const VkAllocationCallbacks *pAllocator,
10551                                                  VkShaderModule *pShaderModule) {
10552    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10553    bool skip_call = false;
10554
10555    // Use SPIRV-Tools validator to try and catch any issues with the module itself
10556    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
10557    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
10558    spv_diagnostic diag = nullptr;
10559
10560    auto result = spvValidate(ctx, &binary, &diag);
10561    if (result != SPV_SUCCESS) {
10562        skip_call |=
10563            log_msg(dev_data->report_data, result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
10564                    VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
10565                    "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
10566    }
10567
10568    spvDiagnosticDestroy(diag);
10569    spvContextDestroy(ctx);
10570
10571    if (skip_call)
10572        return VK_ERROR_VALIDATION_FAILED_EXT;
10573
10574    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
10575
10576    if (res == VK_SUCCESS) {
10577        std::lock_guard<std::mutex> lock(global_lock);
10578        dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
10579    }
10580    return res;
10581}
10582
10583static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
10584    bool skip_call = false;
10585    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
10586        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10587                             VALIDATION_ERROR_00325, "DS",
10588                             "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s",
10589                             type, attachment, attachment_count, validation_error_map[VALIDATION_ERROR_00325]);
10590    }
10591    return skip_call;
10592}
10593
10594static bool IsPowerOfTwo(unsigned x) {
10595    return x && !(x & (x-1));
10596}
10597
10598static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
10599    bool skip_call = false;
10600    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10601        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10602        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
10603            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10604                                 VALIDATION_ERROR_00347, "DS",
10605                                 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s",
10606                                 i, validation_error_map[VALIDATION_ERROR_00347]);
10607        }
10608        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10609            uint32_t attachment = subpass.pPreserveAttachments[j];
10610            if (attachment == VK_ATTACHMENT_UNUSED) {
10611                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10612                                     __LINE__, VALIDATION_ERROR_00356, "DS",
10613                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
10614                                     validation_error_map[VALIDATION_ERROR_00356]);
10615            } else {
10616                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
10617            }
10618        }
10619
10620        auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
10621            subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
10622            [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
10623
10624        unsigned sample_count = 0;
10625
10626        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10627            uint32_t attachment;
10628            if (subpass.pResolveAttachments) {
10629                attachment = subpass.pResolveAttachments[j].attachment;
10630                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
10631
10632                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
10633                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
10634                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10635                                         __LINE__, VALIDATION_ERROR_00352, "DS",
10636                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
10637                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
10638                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
10639                                         validation_error_map[VALIDATION_ERROR_00352]);
10640                }
10641            }
10642            attachment = subpass.pColorAttachments[j].attachment;
10643            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
10644
10645            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10646                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10647
10648                if (subpass_performs_resolve &&
10649                    pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
10650                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10651                                         __LINE__, VALIDATION_ERROR_00351, "DS",
10652                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
10653                                         "which has VK_SAMPLE_COUNT_1_BIT. %s",
10654                                         i, attachment, validation_error_map[VALIDATION_ERROR_00351]);
10655                }
10656            }
10657        }
10658
10659        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10660            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10661            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
10662
10663            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10664                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10665            }
10666        }
10667
10668        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10669            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10670            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
10671        }
10672
10673        if (sample_count && !IsPowerOfTwo(sample_count)) {
10674            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10675                                 VALIDATION_ERROR_00337, "DS", "CreateRenderPass:  Subpass %u attempts to render to "
10676                                                               "attachments with inconsistent sample counts. %s",
10677                                 i, validation_error_map[VALIDATION_ERROR_00337]);
10678        }
10679    }
10680    return skip_call;
10681}
10682
10683VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10684                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
10685    bool skip_call = false;
10686    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10687
10688    std::unique_lock<std::mutex> lock(global_lock);
10689
10690    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
10691    //       ValidateLayouts.
10692    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
10693    if (!skip_call) {
10694        skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
10695    }
10696    lock.unlock();
10697
10698    if (skip_call) {
10699        return VK_ERROR_VALIDATION_FAILED_EXT;
10700    }
10701
10702    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
10703
10704    if (VK_SUCCESS == result) {
10705        lock.lock();
10706
10707        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
10708        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
10709        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
10710
10711        auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
10712        render_pass->renderPass = *pRenderPass;
10713        render_pass->hasSelfDependency = has_self_dependency;
10714        render_pass->subpassToNode = subpass_to_node;
10715
10716        // TODO: Maybe fill list and then copy instead of locking
10717        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
10718        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
10719        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10720            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10721            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10722                uint32_t attachment = subpass.pColorAttachments[j].attachment;
10723                if (!attachment_first_read.count(attachment)) {
10724                    attachment_first_read.insert(std::make_pair(attachment, false));
10725                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
10726                }
10727            }
10728            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10729                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10730                if (!attachment_first_read.count(attachment)) {
10731                    attachment_first_read.insert(std::make_pair(attachment, false));
10732                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
10733                }
10734            }
10735            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10736                uint32_t attachment = subpass.pInputAttachments[j].attachment;
10737                if (!attachment_first_read.count(attachment)) {
10738                    attachment_first_read.insert(std::make_pair(attachment, true));
10739                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
10740                }
10741            }
10742        }
10743
10744        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
10745    }
10746    return result;
10747}
10748
10749static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10750    bool skip_call = false;
10751    auto const pRenderPassInfo = getRenderPassState(dev_data, pRenderPassBegin->renderPass)->createInfo.ptr();
10752    auto const & framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
10753    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10754        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10755                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10756                                                                 "with a different number of attachments.");
10757    }
10758    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10759        const VkImageView &image_view = framebufferInfo.pAttachments[i];
10760        auto view_state = getImageViewState(dev_data, image_view);
10761        assert(view_state);
10762        const VkImage &image = view_state->create_info.image;
10763        const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
10764        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10765                                             pRenderPassInfo->pAttachments[i].initialLayout};
10766        // TODO: Do not iterate over every possibility - consolidate where possible
10767        for (uint32_t j = 0; j < subRange.levelCount; j++) {
10768            uint32_t level = subRange.baseMipLevel + j;
10769            for (uint32_t k = 0; k < subRange.layerCount; k++) {
10770                uint32_t layer = subRange.baseArrayLayer + k;
10771                VkImageSubresource sub = {subRange.aspectMask, level, layer};
10772                IMAGE_CMD_BUF_LAYOUT_NODE node;
10773                if (!FindLayout(pCB, image, sub, node)) {
10774                    SetLayout(pCB, image, sub, newNode);
10775                    continue;
10776                }
10777                if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10778                    newNode.layout != node.layout) {
10779                    skip_call |=
10780                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10781                                DRAWSTATE_INVALID_RENDERPASS, "DS",
10782                                "You cannot start a render pass using attachment %u "
10783                                "where the render pass initial layout is %s and the previous "
10784                                "known layout of the attachment is %s. The layouts must match, or "
10785                                "the render pass initial layout for the attachment must be "
10786                                "VK_IMAGE_LAYOUT_UNDEFINED",
10787                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
10788                }
10789            }
10790        }
10791    }
10792    return skip_call;
10793}
10794
10795static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
10796                                          VkAttachmentReference ref) {
10797    if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10798        auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10799        SetLayout(dev_data, pCB, image_view, ref.layout);
10800    }
10801}
10802
10803static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
10804                                     const int subpass_index) {
10805    auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
10806    if (!renderPass)
10807        return;
10808
10809    auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
10810    if (!framebuffer)
10811        return;
10812
10813    auto const &subpass = renderPass->createInfo.pSubpasses[subpass_index];
10814    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10815        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
10816    }
10817    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10818        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
10819    }
10820    if (subpass.pDepthStencilAttachment) {
10821        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
10822    }
10823}
10824
10825static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name,
10826                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
10827    bool skip_call = false;
10828    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10829        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10830                             error_code, "DS", "Cannot execute command %s on a secondary command buffer. %s", cmd_name.c_str(),
10831                             validation_error_map[error_code]);
10832    }
10833    return skip_call;
10834}
10835
10836static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10837    auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
10838    if (!renderPass)
10839        return;
10840
10841    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->createInfo.ptr();
10842    auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
10843    if (!framebuffer)
10844        return;
10845
10846    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10847        auto image_view = framebuffer->createInfo.pAttachments[i];
10848        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10849    }
10850}
10851
10852static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10853    bool skip_call = false;
10854    const safe_VkFramebufferCreateInfo *pFramebufferInfo =
10855        &getFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
10856    if (pRenderPassBegin->renderArea.offset.x < 0 ||
10857        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10858        pRenderPassBegin->renderArea.offset.y < 0 ||
10859        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10860        skip_call |= static_cast<bool>(log_msg(
10861            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10862            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10863            "Cannot execute a render pass with renderArea not within the bound of the "
10864            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10865            "height %d.",
10866            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10867            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10868    }
10869    return skip_call;
10870}
10871
10872// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10873// [load|store]Op flag must be checked
10874// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10875template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10876    if (color_depth_op != op && stencil_op != op) {
10877        return false;
10878    }
10879    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10880    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
10881
10882    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10883            ((check_stencil_load_op == true) && (stencil_op == op)));
10884}
10885
10886VKAPI_ATTR void VKAPI_CALL
10887CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
10888    bool skip_call = false;
10889    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10890    std::unique_lock<std::mutex> lock(global_lock);
10891    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
10892    auto renderPass = pRenderPassBegin ? getRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
10893    auto framebuffer = pRenderPassBegin ? getFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
10894    if (cb_node) {
10895        if (renderPass) {
10896            uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10897            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
10898            for (uint32_t i = 0; i < renderPass->createInfo.attachmentCount; ++i) {
10899                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10900                auto pAttachment = &renderPass->createInfo.pAttachments[i];
10901                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10902                                                         pAttachment->stencilLoadOp,
10903                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10904                    clear_op_size = static_cast<uint32_t>(i) + 1;
10905                    std::function<bool()> function = [=]() {
10906                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
10907                        return false;
10908                    };
10909                    cb_node->validate_functions.push_back(function);
10910                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10911                                                                pAttachment->stencilLoadOp,
10912                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
10913                    std::function<bool()> function = [=]() {
10914                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
10915                        return false;
10916                    };
10917                    cb_node->validate_functions.push_back(function);
10918                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10919                                                                pAttachment->stencilLoadOp,
10920                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
10921                    std::function<bool()> function = [=]() {
10922                        return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
10923                                                          "vkCmdBeginRenderPass()");
10924                    };
10925                    cb_node->validate_functions.push_back(function);
10926                }
10927                if (renderPass->attachment_first_read[i]) {
10928                    std::function<bool()> function = [=]() {
10929                        return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
10930                                                          "vkCmdBeginRenderPass()");
10931                    };
10932                    cb_node->validate_functions.push_back(function);
10933                }
10934            }
10935            if (clear_op_size > pRenderPassBegin->clearValueCount) {
10936                skip_call |= log_msg(
10937                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10938                    reinterpret_cast<uint64_t &>(renderPass), __LINE__, VALIDATION_ERROR_00442,
10939                    "DS", "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10940                          "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
10941                          "0x%" PRIx64 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10942                          "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10943                          "attachments that aren't cleared they will be ignored. %s",
10944                    pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass), clear_op_size,
10945                    clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
10946            }
10947            if (clear_op_size < pRenderPassBegin->clearValueCount) {
10948                skip_call |= log_msg(
10949                    dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10950                    reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_TOO_MANY_CLEAR_VALUES, "DS",
10951                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but only first %u "
10952                    "entries in pClearValues array are used. The highest index attachment in renderPass 0x%" PRIx64
10953                    " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u - other pClearValues are ignored.",
10954                    pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass), clear_op_size);
10955            }
10956            skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
10957            skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
10958            skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_00440);
10959            skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
10960            skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass", VALIDATION_ERROR_00441);
10961            skip_call |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10962            UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BEGINRENDERPASS);
10963            cb_node->activeRenderPass = renderPass;
10964            // This is a shallow copy as that is all that is needed for now
10965            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10966            cb_node->activeSubpass = 0;
10967            cb_node->activeSubpassContents = contents;
10968            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
10969            // Connect this framebuffer and its children to this cmdBuffer
10970            AddFramebufferBinding(dev_data, cb_node, framebuffer);
10971            // transition attachments to the correct layouts for the first subpass
10972            TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
10973        }
10974    }
10975    lock.unlock();
10976    if (!skip_call) {
10977        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
10978    }
10979}
10980
10981VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10982    bool skip_call = false;
10983    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10984    std::unique_lock<std::mutex> lock(global_lock);
10985    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10986    if (pCB) {
10987        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass", VALIDATION_ERROR_00459);
10988        skip_call |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10989        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_NEXTSUBPASS);
10990        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_00458);
10991
10992        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
10993        if (pCB->activeSubpass == subpassCount - 1) {
10994            skip_call |= log_msg(
10995                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10996                reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00453, "DS",
10997                "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s", validation_error_map[VALIDATION_ERROR_00453]);
10998        }
10999    }
11000    lock.unlock();
11001
11002    if (skip_call)
11003        return;
11004
11005    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
11006
11007    if (pCB) {
11008      lock.lock();
11009      pCB->activeSubpass++;
11010      pCB->activeSubpassContents = contents;
11011      TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
11012    }
11013}
11014
11015VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
11016    bool skip_call = false;
11017    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
11018    std::unique_lock<std::mutex> lock(global_lock);
11019    auto pCB = getCBNode(dev_data, commandBuffer);
11020    if (pCB) {
11021        RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
11022        auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
11023        if (rp_state) {
11024            if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
11025                skip_call |= log_msg(
11026                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11027                    reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00460, "DS",
11028                    "vkCmdEndRenderPass(): Called before reaching final subpass. %s", validation_error_map[VALIDATION_ERROR_00460]);
11029            }
11030
11031            for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
11032                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
11033                auto pAttachment = &rp_state->createInfo.pAttachments[i];
11034                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
11035                                                         pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE)) {
11036                    std::function<bool()> function = [=]() {
11037                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
11038                        return false;
11039                    };
11040                    pCB->validate_functions.push_back(function);
11041                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
11042                                                                pAttachment->stencilStoreOp,
11043                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
11044                    std::function<bool()> function = [=]() {
11045                        SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
11046                        return false;
11047                    };
11048                    pCB->validate_functions.push_back(function);
11049                }
11050            }
11051        }
11052        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_00464);
11053        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass", VALIDATION_ERROR_00465);
11054        skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
11055        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDRENDERPASS);
11056    }
11057    lock.unlock();
11058
11059    if (skip_call)
11060        return;
11061
11062    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
11063
11064    if (pCB) {
11065        lock.lock();
11066        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
11067        pCB->activeRenderPass = nullptr;
11068        pCB->activeSubpass = 0;
11069        pCB->activeFramebuffer = VK_NULL_HANDLE;
11070    }
11071}
11072
11073static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
11074                                        uint32_t secondaryAttach, const char *msg) {
11075    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11076                   VALIDATION_ERROR_02059, "DS",
11077                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
11078                   "that is not compatible with the Primary Cmd Buffer current render pass. "
11079                   "Attachment %u is not compatible with %u: %s. %s",
11080                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg,
11081                   validation_error_map[VALIDATION_ERROR_02059]);
11082}
11083
11084static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11085                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
11086                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
11087                                            uint32_t secondaryAttach, bool is_multi) {
11088    bool skip_call = false;
11089    if (primaryPassCI->attachmentCount <= primaryAttach) {
11090        primaryAttach = VK_ATTACHMENT_UNUSED;
11091    }
11092    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
11093        secondaryAttach = VK_ATTACHMENT_UNUSED;
11094    }
11095    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
11096        return skip_call;
11097    }
11098    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
11099        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
11100                                                 "The first is unused while the second is not.");
11101        return skip_call;
11102    }
11103    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
11104        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
11105                                                 "The second is unused while the first is not.");
11106        return skip_call;
11107    }
11108    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
11109        skip_call |=
11110            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
11111    }
11112    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
11113        skip_call |=
11114            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
11115    }
11116    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
11117        skip_call |=
11118            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
11119    }
11120    return skip_call;
11121}
11122
11123static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11124                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
11125                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
11126    bool skip_call = false;
11127    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
11128    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
11129    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
11130    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
11131        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
11132        if (i < primary_desc.inputAttachmentCount) {
11133            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
11134        }
11135        if (i < secondary_desc.inputAttachmentCount) {
11136            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
11137        }
11138        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
11139                                                     secondaryPassCI, secondary_input_attach, is_multi);
11140    }
11141    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
11142    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
11143        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
11144        if (i < primary_desc.colorAttachmentCount) {
11145            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
11146        }
11147        if (i < secondary_desc.colorAttachmentCount) {
11148            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
11149        }
11150        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
11151                                                     secondaryPassCI, secondary_color_attach, is_multi);
11152        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
11153        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
11154            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
11155        }
11156        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
11157            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
11158        }
11159        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
11160                                                     secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
11161    }
11162    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
11163    if (primary_desc.pDepthStencilAttachment) {
11164        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
11165    }
11166    if (secondary_desc.pDepthStencilAttachment) {
11167        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
11168    }
11169    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
11170                                                 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
11171    return skip_call;
11172}
11173
11174// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
11175//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
11176//  will then feed into this function
11177static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11178                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
11179                                            VkRenderPassCreateInfo const *secondaryPassCI) {
11180    bool skip_call = false;
11181
11182    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
11183        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11184                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
11185                             "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
11186                             " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
11187                             " that has a subpassCount of %u.",
11188                             reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
11189                             reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
11190    } else {
11191        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
11192            skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
11193                                                      primaryPassCI->subpassCount > 1);
11194        }
11195    }
11196    return skip_call;
11197}
11198
11199static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
11200                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
11201    bool skip_call = false;
11202    if (!pSubCB->beginInfo.pInheritanceInfo) {
11203        return skip_call;
11204    }
11205    VkFramebuffer primary_fb = pCB->activeFramebuffer;
11206    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
11207    if (secondary_fb != VK_NULL_HANDLE) {
11208        if (primary_fb != secondary_fb) {
11209            skip_call |= log_msg(
11210                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11211                VALIDATION_ERROR_02060, "DS",
11212                "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64 " which has a framebuffer 0x%" PRIx64
11213                " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
11214                reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
11215                reinterpret_cast<uint64_t &>(primary_fb), validation_error_map[VALIDATION_ERROR_02060]);
11216        }
11217        auto fb = getFramebufferState(dev_data, secondary_fb);
11218        if (!fb) {
11219            skip_call |=
11220                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11221                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11222                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
11223                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
11224            return skip_call;
11225        }
11226        auto cb_renderpass = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
11227        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
11228            skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
11229                                                         cb_renderpass->createInfo.ptr());
11230        }
11231    }
11232    return skip_call;
11233}
11234
11235static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
11236    bool skip_call = false;
11237    unordered_set<int> activeTypes;
11238    for (auto queryObject : pCB->activeQueries) {
11239        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
11240        if (queryPoolData != dev_data->queryPoolMap.end()) {
11241            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
11242                pSubCB->beginInfo.pInheritanceInfo) {
11243                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
11244                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
11245                    skip_call |=
11246                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11247                                VALIDATION_ERROR_02065, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11248                                                              "which has invalid active query pool 0x%" PRIx64
11249                                                              ". Pipeline statistics is being queried so the command "
11250                                                              "buffer must have all bits set on the queryPool. %s",
11251                                pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
11252                                validation_error_map[VALIDATION_ERROR_02065]);
11253                }
11254            }
11255            activeTypes.insert(queryPoolData->second.createInfo.queryType);
11256        }
11257    }
11258    for (auto queryObject : pSubCB->startedQueries) {
11259        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
11260        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
11261            skip_call |=
11262                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11263                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
11264                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11265                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
11266                        "secondary Cmd Buffer 0x%p.",
11267                        pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
11268                        queryPoolData->second.createInfo.queryType, pSubCB->commandBuffer);
11269        }
11270    }
11271
11272    auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
11273    auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
11274    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
11275        skip_call |=
11276            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11277                    reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
11278                    "vkCmdExecuteCommands(): Primary command buffer 0x%p"
11279                    " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
11280                    pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
11281    }
11282
11283    return skip_call;
11284}
11285
11286VKAPI_ATTR void VKAPI_CALL
11287CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
11288    bool skip_call = false;
11289    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
11290    std::unique_lock<std::mutex> lock(global_lock);
11291    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
11292    if (pCB) {
11293        GLOBAL_CB_NODE *pSubCB = NULL;
11294        for (uint32_t i = 0; i < commandBuffersCount; i++) {
11295            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
11296            assert(pSubCB);
11297            if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
11298                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
11299                                     __LINE__, VALIDATION_ERROR_00153, "DS",
11300                                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
11301                                     "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
11302                                     pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_00153]);
11303            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
11304                auto secondary_rp_state = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
11305                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
11306                    skip_call |= log_msg(
11307                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11308                        (uint64_t)pCommandBuffers[i], __LINE__, VALIDATION_ERROR_02057, "DS",
11309                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
11310                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set. %s",
11311                        pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass,
11312                        validation_error_map[VALIDATION_ERROR_02057]);
11313                } else {
11314                    // Make sure render pass is compatible with parent command buffer pass if has continue
11315                    if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
11316                        skip_call |=
11317                            validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
11318                                                            pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
11319                    }
11320                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
11321                    skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
11322                }
11323                string errorString = "";
11324                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
11325                if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
11326                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
11327                                                     secondary_rp_state->createInfo.ptr(), errorString)) {
11328                    skip_call |= log_msg(
11329                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11330                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
11331                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
11332                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
11333                        pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, commandBuffer,
11334                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
11335                }
11336            }
11337            // TODO(mlentine): Move more logic into this method
11338            skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
11339            skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
11340            // Secondary cmdBuffers are considered pending execution starting w/
11341            // being recorded
11342            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
11343                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
11344                    skip_call |=
11345                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11346                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCB->commandBuffer), __LINE__,
11347                                VALIDATION_ERROR_00154, "DS", "Attempt to simultaneously execute command buffer 0x%p"
11348                                                              " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
11349                                pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_00154]);
11350                }
11351                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
11352                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
11353                    skip_call |= log_msg(
11354                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11355                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
11356                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
11357                        "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
11358                        "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
11359                        "set, even though it does.",
11360                        pCommandBuffers[i], pCB->commandBuffer);
11361                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
11362                }
11363            }
11364            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
11365                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11366                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(pCommandBuffers[i]),
11367                                     __LINE__, VALIDATION_ERROR_02062, "DS", "vkCmdExecuteCommands(): Secondary Command Buffer "
11368                                                                             "(0x%p) cannot be submitted with a query in "
11369                                                                             "flight and inherited queries not "
11370                                                                             "supported on this device. %s",
11371                                     pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_02062]);
11372            }
11373            // Propagate layout transitions to the primary cmd buffer
11374            for (auto ilm_entry : pSubCB->imageLayoutMap) {
11375                SetLayout(pCB, ilm_entry.first, ilm_entry.second);
11376            }
11377            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
11378            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
11379            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
11380            for (auto &function : pSubCB->queryUpdates) {
11381                pCB->queryUpdates.push_back(function);
11382            }
11383        }
11384        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands", VALIDATION_ERROR_00163);
11385        skip_call |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
11386        UpdateCmdBufferLastCmd(dev_data, pCB, CMD_EXECUTECOMMANDS);
11387    }
11388    lock.unlock();
11389    if (!skip_call)
11390        dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
11391}
11392
11393// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
11394static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
11395                                    VkDeviceSize end_offset) {
11396    bool skip_call = false;
11397    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11398    // Iterate over all bound image ranges and verify that for any that overlap the
11399    //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
11400    // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
11401    for (auto image_handle : mem_info->bound_images) {
11402        auto img_it = mem_info->bound_ranges.find(image_handle);
11403        if (img_it != mem_info->bound_ranges.end()) {
11404            if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
11405                std::vector<VkImageLayout> layouts;
11406                if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
11407                    for (auto layout : layouts) {
11408                        if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
11409                            skip_call |=
11410                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
11411                                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
11412                                                                                        "GENERAL or PREINITIALIZED are supported.",
11413                                        string_VkImageLayout(layout));
11414                        }
11415                    }
11416                }
11417            }
11418        }
11419    }
11420    return skip_call;
11421}
11422
11423VKAPI_ATTR VkResult VKAPI_CALL
11424MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
11425    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11426
11427    bool skip_call = false;
11428    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11429    std::unique_lock<std::mutex> lock(global_lock);
11430    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
11431    if (mem_info) {
11432        // TODO : This could me more fine-grained to track just region that is valid
11433        mem_info->global_valid = true;
11434        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
11435        skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
11436        // TODO : Do we need to create new "bound_range" for the mapped range?
11437        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
11438        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
11439             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
11440            skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11441                                (uint64_t)mem, __LINE__, VALIDATION_ERROR_00629, "MEM",
11442                                "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
11443                                (uint64_t)mem, validation_error_map[VALIDATION_ERROR_00629]);
11444        }
11445    }
11446    skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
11447    lock.unlock();
11448
11449    if (!skip_call) {
11450        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
11451        if (VK_SUCCESS == result) {
11452            lock.lock();
11453            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
11454            storeMemRanges(dev_data, mem, offset, size);
11455            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
11456            lock.unlock();
11457        }
11458    }
11459    return result;
11460}
11461
11462VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
11463    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11464    bool skip_call = false;
11465
11466    std::unique_lock<std::mutex> lock(global_lock);
11467    skip_call |= deleteMemRanges(dev_data, mem);
11468    lock.unlock();
11469    if (!skip_call) {
11470        dev_data->dispatch_table.UnmapMemory(device, mem);
11471    }
11472}
11473
11474static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
11475                                   const VkMappedMemoryRange *pMemRanges) {
11476    bool skip = false;
11477    for (uint32_t i = 0; i < memRangeCount; ++i) {
11478        auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
11479        if (mem_info) {
11480            if (pMemRanges[i].size == VK_WHOLE_SIZE) {
11481                if (mem_info->mem_range.offset > pMemRanges[i].offset) {
11482                    skip |=
11483                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11484                                (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00643, "MEM",
11485                                "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
11486                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
11487                                funcName, static_cast<size_t>(pMemRanges[i].offset),
11488                                static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_00643]);
11489                }
11490            } else {
11491                const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
11492                                              ? mem_info->alloc_info.allocationSize
11493                                              : (mem_info->mem_range.offset + mem_info->mem_range.size);
11494                if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
11495                    (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
11496                    skip |=
11497                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11498                                (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00642, "MEM",
11499                                "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
11500                                ") exceed the Memory Object's upper-bound "
11501                                "(" PRINTF_SIZE_T_SPECIFIER "). %s",
11502                                funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
11503                                static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
11504                                validation_error_map[VALIDATION_ERROR_00642]);
11505                }
11506            }
11507        }
11508    }
11509    return skip;
11510}
11511
11512static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
11513                                                     const VkMappedMemoryRange *mem_ranges) {
11514    bool skip = false;
11515    for (uint32_t i = 0; i < mem_range_count; ++i) {
11516        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
11517        if (mem_info) {
11518            if (mem_info->shadow_copy) {
11519                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11520                                        ? mem_info->mem_range.size
11521                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
11522                char *data = static_cast<char *>(mem_info->shadow_copy);
11523                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
11524                    if (data[j] != NoncoherentMemoryFillValue) {
11525                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11526                                        VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
11527                                        MEMTRACK_INVALID_MAP, "MEM", "Memory underflow was detected on mem obj 0x%" PRIxLEAST64,
11528                                        (uint64_t)mem_ranges[i].memory);
11529                    }
11530                }
11531                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
11532                    if (data[j] != NoncoherentMemoryFillValue) {
11533                        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11534                                        VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
11535                                        MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
11536                                        (uint64_t)mem_ranges[i].memory);
11537                    }
11538                }
11539                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
11540            }
11541        }
11542    }
11543    return skip;
11544}
11545
11546static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
11547    for (uint32_t i = 0; i < mem_range_count; ++i) {
11548        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
11549        if (mem_info && mem_info->shadow_copy) {
11550            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11551                                    ? mem_info->mem_range.size
11552                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
11553            char *data = static_cast<char *>(mem_info->shadow_copy);
11554            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
11555        }
11556    }
11557}
11558
11559static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
11560                                                  const VkMappedMemoryRange *mem_ranges) {
11561    bool skip = false;
11562    for (uint32_t i = 0; i < mem_range_count; ++i) {
11563        uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
11564        if (vk_safe_modulo(mem_ranges[i].offset, atom_size) != 0) {
11565            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
11566                            __LINE__, VALIDATION_ERROR_00644, "MEM",
11567                            "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
11568                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
11569                            func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_00644]);
11570        }
11571        if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (vk_safe_modulo(mem_ranges[i].size, atom_size) != 0)) {
11572            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
11573                            __LINE__, VALIDATION_ERROR_00645, "MEM",
11574                            "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
11575                            ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
11576                            func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_00645]);
11577        }
11578    }
11579    return skip;
11580}
11581
11582static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11583                                                   const VkMappedMemoryRange *mem_ranges) {
11584    bool skip = false;
11585    std::lock_guard<std::mutex> lock(global_lock);
11586    skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
11587    skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
11588    return skip;
11589}
11590
11591VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
11592                                                       const VkMappedMemoryRange *pMemRanges) {
11593    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11594    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11595
11596    if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
11597        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
11598    }
11599    return result;
11600}
11601
11602static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11603                                                        const VkMappedMemoryRange *mem_ranges) {
11604    bool skip = false;
11605    std::lock_guard<std::mutex> lock(global_lock);
11606    skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
11607    return skip;
11608}
11609
11610static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11611                                                       const VkMappedMemoryRange *mem_ranges) {
11612    std::lock_guard<std::mutex> lock(global_lock);
11613    // Update our shadow copy with modified driver data
11614    CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
11615}
11616
11617VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
11618                                                            const VkMappedMemoryRange *pMemRanges) {
11619    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11620    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11621
11622    if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
11623        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
11624        if (result == VK_SUCCESS) {
11625            PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
11626        }
11627    }
11628    return result;
11629}
11630
11631VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
11632    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11633    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11634    bool skip_call = false;
11635    std::unique_lock<std::mutex> lock(global_lock);
11636    auto image_state = getImageState(dev_data, image);
11637    if (image_state) {
11638        // Track objects tied to memory
11639        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
11640        skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
11641        if (!image_state->memory_requirements_checked) {
11642            // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
11643            //  BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
11644            //  vkGetImageMemoryRequirements()
11645            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11646                                 image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
11647                                 "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
11648                                 " but vkGetImageMemoryRequirements() has not been called on that image.",
11649                                 image_handle);
11650            // Make the call for them so we can verify the state
11651            lock.unlock();
11652            dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &image_state->requirements);
11653            lock.lock();
11654        }
11655
11656        // Track and validate bound memory range information
11657        auto mem_info = getMemObjInfo(dev_data, mem);
11658        if (mem_info) {
11659            skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
11660                                                image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
11661            skip_call |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
11662                                             VALIDATION_ERROR_00806);
11663        }
11664
11665        lock.unlock();
11666        if (!skip_call) {
11667            result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
11668            lock.lock();
11669            image_state->binding.mem = mem;
11670            image_state->binding.offset = memoryOffset;
11671            image_state->binding.size = image_state->requirements.size;
11672            lock.unlock();
11673        }
11674    } else {
11675        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11676                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
11677                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
11678                reinterpret_cast<const uint64_t &>(image));
11679    }
11680    return result;
11681}
11682
11683VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
11684    bool skip_call = false;
11685    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11686    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11687    std::unique_lock<std::mutex> lock(global_lock);
11688    auto event_state = getEventNode(dev_data, event);
11689    if (event_state) {
11690        event_state->needsSignaled = false;
11691        event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
11692        if (event_state->write_in_use) {
11693            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
11694                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11695                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
11696                                 reinterpret_cast<const uint64_t &>(event));
11697        }
11698    }
11699    lock.unlock();
11700    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
11701    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
11702    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
11703    for (auto queue_data : dev_data->queueMap) {
11704        auto event_entry = queue_data.second.eventToStageMap.find(event);
11705        if (event_entry != queue_data.second.eventToStageMap.end()) {
11706            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
11707        }
11708    }
11709    if (!skip_call)
11710        result = dev_data->dispatch_table.SetEvent(device, event);
11711    return result;
11712}
11713
11714VKAPI_ATTR VkResult VKAPI_CALL
11715QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
11716    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11717    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11718    bool skip_call = false;
11719    std::unique_lock<std::mutex> lock(global_lock);
11720    auto pFence = getFenceNode(dev_data, fence);
11721    auto pQueue = getQueueState(dev_data, queue);
11722
11723    // First verify that fence is not in use
11724    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11725
11726    if (pFence) {
11727        SubmitFence(pQueue, pFence, bindInfoCount);
11728    }
11729
11730    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11731        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
11732        // Track objects tied to memory
11733        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
11734            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
11735                auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
11736                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11737                                        (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
11738                                        "vkQueueBindSparse"))
11739                    skip_call = true;
11740            }
11741        }
11742        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
11743            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
11744                auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
11745                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11746                                        (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11747                                        "vkQueueBindSparse"))
11748                    skip_call = true;
11749            }
11750        }
11751        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
11752            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
11753                auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
11754                // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
11755                VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
11756                if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
11757                                        (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11758                                        "vkQueueBindSparse"))
11759                    skip_call = true;
11760            }
11761        }
11762
11763        std::vector<SEMAPHORE_WAIT> semaphore_waits;
11764        std::vector<VkSemaphore> semaphore_signals;
11765        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11766            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11767            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11768            if (pSemaphore) {
11769                if (pSemaphore->signaled) {
11770                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11771                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11772                        pSemaphore->in_use.fetch_add(1);
11773                    }
11774                    pSemaphore->signaler.first = VK_NULL_HANDLE;
11775                    pSemaphore->signaled = false;
11776                } else {
11777                    skip_call |= log_msg(
11778                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11779                        reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11780                        "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11781                        queue, reinterpret_cast<const uint64_t &>(semaphore));
11782                }
11783            }
11784        }
11785        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11786            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11787            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11788            if (pSemaphore) {
11789                if (pSemaphore->signaled) {
11790                    skip_call =
11791                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11792                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11793                                "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
11794                                ", but that semaphore is already signaled.",
11795                                queue, reinterpret_cast<const uint64_t &>(semaphore));
11796                }
11797                else {
11798                    pSemaphore->signaler.first = queue;
11799                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11800                    pSemaphore->signaled = true;
11801                    pSemaphore->in_use.fetch_add(1);
11802                    semaphore_signals.push_back(semaphore);
11803                }
11804            }
11805        }
11806
11807        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11808                                         semaphore_waits,
11809                                         semaphore_signals,
11810                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11811    }
11812
11813    if (pFence && !bindInfoCount) {
11814        // No work to do, just dropping a fence in the queue by itself.
11815        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11816                                         std::vector<SEMAPHORE_WAIT>(),
11817                                         std::vector<VkSemaphore>(),
11818                                         fence);
11819    }
11820
11821    lock.unlock();
11822
11823    if (!skip_call)
11824        return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
11825
11826    return result;
11827}
11828
11829VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11830                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
11831    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11832    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
11833    if (result == VK_SUCCESS) {
11834        std::lock_guard<std::mutex> lock(global_lock);
11835        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
11836        sNode->signaler.first = VK_NULL_HANDLE;
11837        sNode->signaler.second = 0;
11838        sNode->signaled = false;
11839    }
11840    return result;
11841}
11842
11843VKAPI_ATTR VkResult VKAPI_CALL
11844CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
11845    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11846    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
11847    if (result == VK_SUCCESS) {
11848        std::lock_guard<std::mutex> lock(global_lock);
11849        dev_data->eventMap[*pEvent].needsSignaled = false;
11850        dev_data->eventMap[*pEvent].write_in_use = 0;
11851        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11852    }
11853    return result;
11854}
11855
11856static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
11857                                              VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
11858                                              SWAPCHAIN_NODE *old_swapchain_state) {
11859    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
11860
11861    // TODO: revisit this. some of these rules are being relaxed.
11862    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
11863        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11864                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
11865                    "%s: surface has an existing swapchain other than oldSwapchain", func_name))
11866            return true;
11867    }
11868    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
11869        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11870                    reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
11871                    "DS", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
11872            return true;
11873    }
11874    auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
11875    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
11876        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11877                    reinterpret_cast<uint64_t>(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
11878                    "%s: surface capabilities not retrieved for this physical device", func_name))
11879            return true;
11880    } else { // have valid capabilities
11881        auto &capabilities = physical_device_state->surfaceCapabilities;
11882        // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
11883        if (pCreateInfo->minImageCount < capabilities.minImageCount) {
11884            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11885                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02331, "DS",
11886                        "%s called with minImageCount = %d, which is outside the bounds returned "
11887                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
11888                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
11889                        validation_error_map[VALIDATION_ERROR_02331]))
11890                return true;
11891        }
11892
11893        if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
11894            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11895                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02332, "DS",
11896                        "%s called with minImageCount = %d, which is outside the bounds returned "
11897                        "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
11898                        func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
11899                        validation_error_map[VALIDATION_ERROR_02332]))
11900                return true;
11901        }
11902
11903        // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
11904        if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
11905            ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
11906             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
11907             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
11908             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
11909            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11910                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
11911                        "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
11912                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
11913                        "maxImageExtent = (%d,%d). %s",
11914                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11915                        capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
11916                        capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
11917                        validation_error_map[VALIDATION_ERROR_02334]))
11918                return true;
11919        }
11920        if ((capabilities.currentExtent.width != kSurfaceSizeFromSwapchain) &&
11921            ((pCreateInfo->imageExtent.width != capabilities.currentExtent.width) ||
11922             (pCreateInfo->imageExtent.height != capabilities.currentExtent.height))) {
11923            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11924                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
11925                        "%s called with imageExtent = (%d,%d), which is not equal to the currentExtent = (%d,%d) returned by "
11926                        "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(). %s",
11927                        func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11928                        capabilities.currentExtent.width, capabilities.currentExtent.height,
11929                        validation_error_map[VALIDATION_ERROR_02334]))
11930                return true;
11931        }
11932        // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
11933        // VkSurfaceCapabilitiesKHR::supportedTransforms.
11934        if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
11935            !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
11936            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
11937            // it up a little at a time, and then log it:
11938            std::string errorString = "";
11939            char str[1024];
11940            // Here's the first part of the message:
11941            sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
11942                    string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
11943            errorString += str;
11944            for (int i = 0; i < 32; i++) {
11945                // Build up the rest of the message:
11946                if ((1 << i) & capabilities.supportedTransforms) {
11947                    const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
11948                    sprintf(str, "    %s\n", newStr);
11949                    errorString += str;
11950                }
11951            }
11952            // Log the message that we've built up:
11953            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11954                        reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02339, "DS", "%s. %s",
11955                        errorString.c_str(), validation_error_map[VALIDATION_ERROR_02339]))
11956                return true;
11957        }
11958
11959        // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
11960        // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
11961        if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
11962            !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
11963            // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
11964            // it up a little at a time, and then log it:
11965            std::string errorString = "";
11966            char str[1024];
11967            // Here's the first part of the message:
11968            sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
11969                    func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
11970            errorString += str;
11971            for (int i = 0; i < 32; i++) {
11972                // Build up the rest of the message:
11973                if ((1 << i) & capabilities.supportedCompositeAlpha) {
11974                    const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
11975                    sprintf(str, "    %s\n", newStr);
11976                    errorString += str;
11977                }
11978            }
11979            // Log the message that we've built up:
11980            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11981                        reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02340, "DS", "%s. %s",
11982                        errorString.c_str(), validation_error_map[VALIDATION_ERROR_02340]))
11983                return true;
11984        }
11985        // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
11986        if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
11987            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11988                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02335, "DS",
11989                        "%s called with a non-supported imageArrayLayers (i.e. %d).  Minimum value is 1, maximum value is %d. %s",
11990                        func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
11991                        validation_error_map[VALIDATION_ERROR_02335]))
11992                return true;
11993        }
11994        // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
11995        if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
11996            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11997                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02336, "DS",
11998                        "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
11999                        func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
12000                        validation_error_map[VALIDATION_ERROR_02336]))
12001                return true;
12002        }
12003    }
12004
12005    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
12006    if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
12007        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12008                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
12009                    "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
12010            return true;
12011    } else {
12012        // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
12013        bool foundFormat = false;
12014        bool foundColorSpace = false;
12015        bool foundMatch = false;
12016        for (auto const &format : physical_device_state->surface_formats) {
12017            if (pCreateInfo->imageFormat == format.format) {
12018                // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
12019                foundFormat = true;
12020                if (pCreateInfo->imageColorSpace == format.colorSpace) {
12021                    foundMatch = true;
12022                    break;
12023                }
12024            } else {
12025                if (pCreateInfo->imageColorSpace == format.colorSpace) {
12026                    foundColorSpace = true;
12027                }
12028            }
12029        }
12030        if (!foundMatch) {
12031            if (!foundFormat) {
12032                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12033                            reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
12034                            "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s",
12035                            func_name, pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_02333]))
12036                    return true;
12037            }
12038            if (!foundColorSpace) {
12039                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12040                            reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
12041                            "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s",
12042                            func_name, pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_02333]))
12043                    return true;
12044            }
12045        }
12046    }
12047
12048    // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
12049    if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
12050        // FIFO is required to always be supported
12051        if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
12052            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12053                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
12054                        "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
12055                return true;
12056        }
12057    } else {
12058        // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
12059        bool foundMatch = std::find(physical_device_state->present_modes.begin(),
12060                                    physical_device_state->present_modes.end(),
12061                                    pCreateInfo->presentMode) != physical_device_state->present_modes.end();
12062        if (!foundMatch) {
12063            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12064                        reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02341, "DS",
12065                        "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
12066                        string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_02341]))
12067                return true;
12068        }
12069    }
12070
12071    return false;
12072}
12073
12074static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
12075                                             VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
12076                                             SWAPCHAIN_NODE *old_swapchain_state) {
12077    if (VK_SUCCESS == result) {
12078        std::lock_guard<std::mutex> lock(global_lock);
12079        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
12080        surface_state->swapchain = swapchain_state.get();
12081        dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
12082    } else {
12083        surface_state->swapchain = nullptr;
12084    }
12085    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
12086    if (old_swapchain_state) {
12087        old_swapchain_state->replaced = true;
12088    }
12089    surface_state->old_swapchain = old_swapchain_state;
12090    return;
12091}
12092
12093VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
12094                                                  const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
12095    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12096    auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface);
12097    auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
12098
12099    if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
12100        return VK_ERROR_VALIDATION_FAILED_EXT;
12101    }
12102
12103    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
12104
12105    PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
12106
12107    return result;
12108}
12109
12110VKAPI_ATTR void VKAPI_CALL
12111DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
12112    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12113    bool skip_call = false;
12114
12115    std::unique_lock<std::mutex> lock(global_lock);
12116    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
12117    if (swapchain_data) {
12118        if (swapchain_data->images.size() > 0) {
12119            for (auto swapchain_image : swapchain_data->images) {
12120                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
12121                if (image_sub != dev_data->imageSubresourceMap.end()) {
12122                    for (auto imgsubpair : image_sub->second) {
12123                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
12124                        if (image_item != dev_data->imageLayoutMap.end()) {
12125                            dev_data->imageLayoutMap.erase(image_item);
12126                        }
12127                    }
12128                    dev_data->imageSubresourceMap.erase(image_sub);
12129                }
12130                skip_call =
12131                    ClearMemoryObjectBindings(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
12132                dev_data->imageMap.erase(swapchain_image);
12133            }
12134        }
12135
12136        auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
12137        if (surface_state) {
12138            if (surface_state->swapchain == swapchain_data)
12139                surface_state->swapchain = nullptr;
12140            if (surface_state->old_swapchain == swapchain_data)
12141                surface_state->old_swapchain = nullptr;
12142        }
12143
12144        dev_data->device_extensions.swapchainMap.erase(swapchain);
12145    }
12146    lock.unlock();
12147    if (!skip_call)
12148        dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
12149}
12150
12151VKAPI_ATTR VkResult VKAPI_CALL
12152GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
12153    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12154    VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
12155
12156    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
12157        // This should never happen and is checked by param checker.
12158        if (!pCount)
12159            return result;
12160        std::lock_guard<std::mutex> lock(global_lock);
12161        const size_t count = *pCount;
12162        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
12163        if (swapchain_node && !swapchain_node->images.empty()) {
12164            // TODO : Not sure I like the memcmp here, but it works
12165            const bool mismatch = (swapchain_node->images.size() != count ||
12166                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
12167            if (mismatch) {
12168                // TODO: Verify against Valid Usage section of extension
12169                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12170                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
12171                        "vkGetSwapchainInfoKHR(0x%" PRIx64
12172                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
12173                        (uint64_t)(swapchain));
12174            }
12175        }
12176        for (uint32_t i = 0; i < *pCount; ++i) {
12177            IMAGE_LAYOUT_NODE image_layout_node;
12178            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
12179            image_layout_node.format = swapchain_node->createInfo.imageFormat;
12180            // Add imageMap entries for each swapchain image
12181            VkImageCreateInfo image_ci = {};
12182            image_ci.mipLevels = 1;
12183            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
12184            image_ci.usage = swapchain_node->createInfo.imageUsage;
12185            image_ci.format = swapchain_node->createInfo.imageFormat;
12186            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
12187            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
12188            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
12189            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
12190            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
12191            auto &image_state = dev_data->imageMap[pSwapchainImages[i]];
12192            image_state->valid = false;
12193            image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
12194            swapchain_node->images.push_back(pSwapchainImages[i]);
12195            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
12196            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
12197            dev_data->imageLayoutMap[subpair] = image_layout_node;
12198            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
12199        }
12200    }
12201    return result;
12202}
12203
12204VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
12205    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
12206    bool skip_call = false;
12207
12208    std::lock_guard<std::mutex> lock(global_lock);
12209    auto queue_state = getQueueState(dev_data, queue);
12210
12211    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
12212        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
12213        if (pSemaphore && !pSemaphore->signaled) {
12214            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12215                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
12216                                 "DS", "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
12217                                 reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
12218        }
12219    }
12220
12221    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
12222        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
12223        if (swapchain_data) {
12224            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
12225                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12226                                     reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
12227                                     "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
12228                                     pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
12229            }
12230            else {
12231                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
12232                auto image_state = getImageState(dev_data, image);
12233                skip_call |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
12234
12235                if (!image_state->acquired) {
12236                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12237                                         reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
12238                                         "DS", "vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
12239                                         pPresentInfo->pImageIndices[i]);
12240                }
12241
12242                vector<VkImageLayout> layouts;
12243                if (FindLayouts(dev_data, image, layouts)) {
12244                    for (auto layout : layouts) {
12245                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
12246                            skip_call |=
12247                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
12248                                        reinterpret_cast<uint64_t &>(queue), __LINE__, VALIDATION_ERROR_01964, "DS",
12249                                        "Images passed to present must be in layout "
12250                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s. %s",
12251                                        string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_01964]);
12252                        }
12253                    }
12254                }
12255            }
12256
12257            // All physical devices and queue families are required to be able
12258            // to present to any native window on Android; require the
12259            // application to have established support on any other platform.
12260            if (!dev_data->instance_data->androidSurfaceExtensionEnabled) {
12261                auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
12262                auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
12263
12264                if (support_it == surface_state->gpu_queue_support.end()) {
12265                    skip_call |=
12266                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12267                                reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
12268                                DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS", "vkQueuePresentKHR: Presenting image without calling "
12269                                                                             "vkGetPhysicalDeviceSurfaceSupportKHR");
12270                } else if (!support_it->second) {
12271                    skip_call |=
12272                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12273                                reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_01961,
12274                                "DS", "vkQueuePresentKHR: Presenting image on queue that cannot "
12275                                      "present to this surface. %s",
12276                                validation_error_map[VALIDATION_ERROR_01961]);
12277                }
12278            }
12279        }
12280    }
12281
12282    if (skip_call) {
12283        return VK_ERROR_VALIDATION_FAILED_EXT;
12284    }
12285
12286    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
12287
12288    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
12289        // Semaphore waits occur before error generation, if the call reached
12290        // the ICD. (Confirm?)
12291        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
12292            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
12293            if (pSemaphore) {
12294                pSemaphore->signaler.first = VK_NULL_HANDLE;
12295                pSemaphore->signaled = false;
12296            }
12297        }
12298
12299        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
12300            // Note: this is imperfect, in that we can get confused about what
12301            // did or didn't succeed-- but if the app does that, it's confused
12302            // itself just as much.
12303            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
12304
12305            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR)
12306                continue; // this present didn't actually happen.
12307
12308            // Mark the image as having been released to the WSI
12309            auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
12310            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
12311            auto image_state = getImageState(dev_data, image);
12312            image_state->acquired = false;
12313        }
12314
12315        // Note: even though presentation is directed to a queue, there is no
12316        // direct ordering between QP and subsequent work, so QP (and its
12317        // semaphore waits) /never/ participate in any completion proof.
12318    }
12319
12320    return result;
12321}
12322
12323static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
12324                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
12325                                                     std::vector<SURFACE_STATE *> &surface_state,
12326                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
12327    if (pCreateInfos) {
12328        std::lock_guard<std::mutex> lock(global_lock);
12329        for (uint32_t i = 0; i < swapchainCount; i++) {
12330            surface_state.push_back(getSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
12331            old_swapchain_state.push_back(getSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
12332            std::stringstream func_name;
12333            func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
12334                if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i], old_swapchain_state[i])) {
12335                return true;
12336            }
12337        }
12338    }
12339    return false;
12340}
12341
12342static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
12343                                                    const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
12344                                                    std::vector<SURFACE_STATE *> &surface_state,
12345                                                    std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
12346    if (VK_SUCCESS == result) {
12347        for (uint32_t i = 0; i < swapchainCount; i++) {
12348            auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
12349            surface_state[i]->swapchain = swapchain_state.get();
12350            dev_data->device_extensions.swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
12351        }
12352    } else {
12353        for (uint32_t i = 0; i < swapchainCount; i++) {
12354            surface_state[i]->swapchain = nullptr;
12355        }
12356    }
12357    // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
12358    for (uint32_t i = 0; i < swapchainCount; i++) {
12359        if (old_swapchain_state[i]) {
12360            old_swapchain_state[i]->replaced = true;
12361        }
12362        surface_state[i]->old_swapchain = old_swapchain_state[i];
12363    }
12364    return;
12365}
12366
12367VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
12368                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
12369                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
12370    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12371    std::vector<SURFACE_STATE *> surface_state;
12372    std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
12373
12374    if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
12375                                                 old_swapchain_state)) {
12376        return VK_ERROR_VALIDATION_FAILED_EXT;
12377    }
12378
12379    VkResult result =
12380        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
12381
12382    PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
12383                                            old_swapchain_state);
12384
12385    return result;
12386}
12387
12388VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
12389                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
12390    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12391    bool skip_call = false;
12392
12393    std::unique_lock<std::mutex> lock(global_lock);
12394
12395    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
12396        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12397                             reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
12398                             "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
12399                             "to determine the completion of this operation.");
12400    }
12401
12402    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
12403    if (pSemaphore && pSemaphore->signaled) {
12404        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
12405                             reinterpret_cast<const uint64_t &>(semaphore), __LINE__, VALIDATION_ERROR_01952, "DS",
12406                             "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
12407                             validation_error_map[VALIDATION_ERROR_01952]);
12408    }
12409
12410    auto pFence = getFenceNode(dev_data, fence);
12411    if (pFence) {
12412        skip_call |= ValidateFenceForSubmit(dev_data, pFence);
12413    }
12414
12415    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
12416
12417    if (swapchain_data->replaced) {
12418        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12419                             reinterpret_cast<uint64_t &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
12420                             "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
12421                             "present any images it has acquired, but cannot acquire any more.");
12422    }
12423
12424    auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
12425    if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
12426        uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
12427                                                 [=](VkImage image) { return getImageState(dev_data, image)->acquired; });
12428        if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
12429            skip_call |=
12430                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12431                        reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
12432                        "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
12433                        acquired_images);
12434        }
12435    }
12436
12437    if (swapchain_data->images.size() == 0) {
12438        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12439                             reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
12440                             "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
12441                             "vkGetSwapchainImagesKHR after swapchain creation.");
12442    }
12443
12444    lock.unlock();
12445
12446    if (skip_call)
12447        return VK_ERROR_VALIDATION_FAILED_EXT;
12448
12449    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
12450
12451    lock.lock();
12452    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
12453        if (pFence) {
12454            pFence->state = FENCE_INFLIGHT;
12455            pFence->signaler.first = VK_NULL_HANDLE;   // ANI isn't on a queue, so this can't participate in a completion proof.
12456        }
12457
12458        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
12459        if (pSemaphore) {
12460            pSemaphore->signaled = true;
12461            pSemaphore->signaler.first = VK_NULL_HANDLE;
12462        }
12463
12464        // Mark the image as acquired.
12465        auto image = swapchain_data->images[*pImageIndex];
12466        auto image_state = getImageState(dev_data, image);
12467        image_state->acquired = true;
12468    }
12469    lock.unlock();
12470
12471    return result;
12472}
12473
12474VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
12475                                                        VkPhysicalDevice *pPhysicalDevices) {
12476    bool skip_call = false;
12477    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12478    assert(instance_data);
12479
12480    // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
12481    if (NULL == pPhysicalDevices) {
12482        instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
12483    } else {
12484        if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
12485            // Flag warning here. You can call this without having queried the count, but it may not be
12486            // robust on platforms with multiple physical devices.
12487            skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12488                                 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
12489                                 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
12490                                 "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
12491        } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
12492        else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
12493            // Having actual count match count from app is not a requirement, so this can be a warning
12494            skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12495                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12496                                 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
12497                                 "supported by this instance is %u.",
12498                                 *pPhysicalDeviceCount, instance_data->physical_devices_count);
12499        }
12500        instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
12501    }
12502    if (skip_call) {
12503        return VK_ERROR_VALIDATION_FAILED_EXT;
12504    }
12505    VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
12506    if (NULL == pPhysicalDevices) {
12507        instance_data->physical_devices_count = *pPhysicalDeviceCount;
12508    } else if (result == VK_SUCCESS) { // Save physical devices
12509        for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
12510            auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
12511            phys_device_state.phys_device = pPhysicalDevices[i];
12512            // Init actual features for each physical device
12513            instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
12514        }
12515    }
12516    return result;
12517}
12518
12519VKAPI_ATTR void VKAPI_CALL
12520GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12521    VkQueueFamilyProperties *pQueueFamilyProperties) {
12522    bool skip_call = false;
12523    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12524    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12525    if (physical_device_state) {
12526        if (!pQueueFamilyProperties) {
12527            physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
12528        }
12529        else {
12530            // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
12531            // get count
12532            if (UNCALLED == physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
12533                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12534                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
12535                    "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
12536                    "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
12537                    "NULL pQueueFamilyProperties to query pCount.");
12538            }
12539            // Then verify that pCount that is passed in on second call matches what was returned
12540            if (physical_device_state->queueFamilyPropertiesCount != *pCount) {
12541
12542                // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
12543                // provide as warning
12544                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12545                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12546                    "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
12547                    "supported by this physicalDevice is %u.",
12548                    *pCount, physical_device_state->queueFamilyPropertiesCount);
12549            }
12550            physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
12551        }
12552        if (skip_call) {
12553            return;
12554        }
12555        instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
12556        if (!pQueueFamilyProperties) {
12557            physical_device_state->queueFamilyPropertiesCount = *pCount;
12558        }
12559        else { // Save queue family properties
12560            if (physical_device_state->queue_family_properties.size() < *pCount)
12561                physical_device_state->queue_family_properties.resize(*pCount);
12562            for (uint32_t i = 0; i < *pCount; i++) {
12563                physical_device_state->queue_family_properties[i] = pQueueFamilyProperties[i];
12564            }
12565        }
12566    }
12567    else {
12568        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
12569                __LINE__, VALIDATION_ERROR_00028, "DL",
12570                "Invalid physicalDevice (0x%p) passed into vkGetPhysicalDeviceQueueFamilyProperties(). %s", physicalDevice,
12571                validation_error_map[VALIDATION_ERROR_00028]);
12572    }
12573}
12574
12575template<typename TCreateInfo, typename FPtr>
12576static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo,
12577                              VkAllocationCallbacks const *pAllocator, VkSurfaceKHR *pSurface,
12578                              FPtr fptr)
12579{
12580    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12581
12582    // Call down the call chain:
12583    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
12584
12585    if (result == VK_SUCCESS) {
12586        std::unique_lock<std::mutex> lock(global_lock);
12587        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
12588        lock.unlock();
12589    }
12590
12591    return result;
12592}
12593
12594VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
12595    bool skip_call = false;
12596    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12597    std::unique_lock<std::mutex> lock(global_lock);
12598    auto surface_state = getSurfaceState(instance_data, surface);
12599
12600    if (surface_state) {
12601        // TODO: track swapchains created from this surface.
12602        instance_data->surface_map.erase(surface);
12603    }
12604    lock.unlock();
12605
12606    if (!skip_call) {
12607        // Call down the call chain:
12608        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
12609    }
12610}
12611
12612VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
12613                                                            const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12614    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
12615}
12616
12617#ifdef VK_USE_PLATFORM_ANDROID_KHR
12618VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
12619                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12620    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
12621}
12622#endif // VK_USE_PLATFORM_ANDROID_KHR
12623
12624#ifdef VK_USE_PLATFORM_MIR_KHR
12625VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
12626                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12627    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
12628}
12629#endif // VK_USE_PLATFORM_MIR_KHR
12630
12631#ifdef VK_USE_PLATFORM_WAYLAND_KHR
12632VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
12633                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12634    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
12635}
12636#endif // VK_USE_PLATFORM_WAYLAND_KHR
12637
12638#ifdef VK_USE_PLATFORM_WIN32_KHR
12639VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
12640                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12641    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
12642}
12643#endif // VK_USE_PLATFORM_WIN32_KHR
12644
12645#ifdef VK_USE_PLATFORM_XCB_KHR
12646VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
12647                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12648    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
12649}
12650#endif // VK_USE_PLATFORM_XCB_KHR
12651
12652#ifdef VK_USE_PLATFORM_XLIB_KHR
12653VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
12654                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12655    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
12656}
12657#endif // VK_USE_PLATFORM_XLIB_KHR
12658
12659
12660VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12661                                                                       VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
12662    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12663
12664    std::unique_lock<std::mutex> lock(global_lock);
12665    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12666    lock.unlock();
12667
12668    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface,
12669                                                                                        pSurfaceCapabilities);
12670
12671    if (result == VK_SUCCESS) {
12672        physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
12673        physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
12674    }
12675
12676    return result;
12677}
12678
12679
12680VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
12681                                                                  VkSurfaceKHR surface, VkBool32 *pSupported) {
12682    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12683    std::unique_lock<std::mutex> lock(global_lock);
12684    auto surface_state = getSurfaceState(instance_data, surface);
12685    lock.unlock();
12686
12687    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface,
12688                                                                                   pSupported);
12689
12690    if (result == VK_SUCCESS) {
12691        surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported != 0);
12692    }
12693
12694    return result;
12695}
12696
12697
12698VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12699                                                                       uint32_t *pPresentModeCount,
12700                                                                       VkPresentModeKHR *pPresentModes) {
12701    bool skip_call = false;
12702    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12703    std::unique_lock<std::mutex> lock(global_lock);
12704    // TODO: this isn't quite right. available modes may differ by surface AND physical device.
12705    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12706    auto & call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
12707
12708    if (pPresentModes) {
12709        // Compare the preliminary value of *pPresentModeCount with the value this time:
12710        auto prev_mode_count = (uint32_t) physical_device_state->present_modes.size();
12711        switch (call_state) {
12712        case UNCALLED:
12713            skip_call |= log_msg(
12714                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12715                reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
12716                "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
12717                "value has been seen for pPresentModeCount.");
12718            break;
12719        default:
12720            // both query count and query details
12721            if (*pPresentModeCount != prev_mode_count) {
12722                skip_call |= log_msg(
12723                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12724                        reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12725                        "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs from the value "
12726                        "(%u) that was returned when pPresentModes was NULL.",
12727                        *pPresentModeCount, prev_mode_count);
12728            }
12729            break;
12730        }
12731    }
12732    lock.unlock();
12733
12734    if (skip_call)
12735        return VK_ERROR_VALIDATION_FAILED_EXT;
12736
12737    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount, pPresentModes);
12738
12739    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12740
12741        lock.lock();
12742
12743        if (*pPresentModeCount) {
12744            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12745            if (*pPresentModeCount > physical_device_state->present_modes.size())
12746                physical_device_state->present_modes.resize(*pPresentModeCount);
12747        }
12748        if (pPresentModes) {
12749            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12750            for (uint32_t i = 0; i < *pPresentModeCount; i++) {
12751                physical_device_state->present_modes[i] = pPresentModes[i];
12752            }
12753        }
12754    }
12755
12756    return result;
12757}
12758
12759
12760VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12761                                                                  uint32_t *pSurfaceFormatCount,
12762                                                                  VkSurfaceFormatKHR *pSurfaceFormats) {
12763    bool skip_call = false;
12764    auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12765    std::unique_lock<std::mutex> lock(global_lock);
12766    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12767    auto & call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
12768
12769    if (pSurfaceFormats) {
12770        auto prev_format_count = (uint32_t) physical_device_state->surface_formats.size();
12771
12772        switch (call_state) {
12773        case UNCALLED:
12774            // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't
12775            // previously call this function with a NULL value of pSurfaceFormats:
12776            skip_call |= log_msg(
12777                instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12778                reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
12779                "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
12780                "value has been seen for pSurfaceFormats.");
12781            break;
12782        default:
12783            if (prev_format_count != *pSurfaceFormatCount) {
12784                skip_call |= log_msg(
12785                        instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12786                        reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12787                        "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats set to "
12788                        "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
12789                        *pSurfaceFormatCount, prev_format_count);
12790            }
12791            break;
12792        }
12793    }
12794    lock.unlock();
12795
12796    if (skip_call)
12797        return VK_ERROR_VALIDATION_FAILED_EXT;
12798
12799    // Call down the call chain:
12800    auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
12801                                                                                   pSurfaceFormats);
12802
12803    if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12804
12805        lock.lock();
12806
12807        if (*pSurfaceFormatCount) {
12808            if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12809            if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
12810                physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
12811        }
12812        if (pSurfaceFormats) {
12813            if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12814            for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
12815                physical_device_state->surface_formats[i] = pSurfaceFormats[i];
12816            }
12817        }
12818    }
12819    return result;
12820}
12821
12822
12823VKAPI_ATTR VkResult VKAPI_CALL
12824CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12825                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
12826    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12827    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
12828    if (VK_SUCCESS == res) {
12829        std::lock_guard<std::mutex> lock(global_lock);
12830        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
12831    }
12832    return res;
12833}
12834
12835VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
12836                                                         VkDebugReportCallbackEXT msgCallback,
12837                                                         const VkAllocationCallbacks *pAllocator) {
12838    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12839    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
12840    std::lock_guard<std::mutex> lock(global_lock);
12841    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
12842}
12843
12844VKAPI_ATTR void VKAPI_CALL
12845DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
12846                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
12847    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12848    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
12849}
12850
12851VKAPI_ATTR VkResult VKAPI_CALL
12852EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12853    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12854}
12855
12856VKAPI_ATTR VkResult VKAPI_CALL
12857EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
12858    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12859}
12860
12861VKAPI_ATTR VkResult VKAPI_CALL
12862EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
12863    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12864        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
12865
12866    return VK_ERROR_LAYER_NOT_PRESENT;
12867}
12868
12869VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12870                                                                  const char *pLayerName, uint32_t *pCount,
12871                                                                  VkExtensionProperties *pProperties) {
12872    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12873        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
12874
12875    assert(physicalDevice);
12876
12877    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12878    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
12879}
12880
12881static PFN_vkVoidFunction
12882intercept_core_instance_command(const char *name);
12883
12884static PFN_vkVoidFunction
12885intercept_core_device_command(const char *name);
12886
12887static PFN_vkVoidFunction
12888intercept_khr_swapchain_command(const char *name, VkDevice dev);
12889
12890static PFN_vkVoidFunction
12891intercept_khr_surface_command(const char *name, VkInstance instance);
12892
12893VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
12894    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
12895    if (proc)
12896        return proc;
12897
12898    assert(dev);
12899
12900    proc = intercept_khr_swapchain_command(funcName, dev);
12901    if (proc)
12902        return proc;
12903
12904    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
12905
12906    auto &table = dev_data->dispatch_table;
12907    if (!table.GetDeviceProcAddr)
12908        return nullptr;
12909    return table.GetDeviceProcAddr(dev, funcName);
12910}
12911
12912VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
12913    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
12914    if (!proc)
12915        proc = intercept_core_device_command(funcName);
12916    if (!proc)
12917        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
12918    if (!proc)
12919        proc = intercept_khr_surface_command(funcName, instance);
12920    if (proc)
12921        return proc;
12922
12923    assert(instance);
12924
12925    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12926    proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
12927    if (proc)
12928        return proc;
12929
12930    auto &table = instance_data->dispatch_table;
12931    if (!table.GetInstanceProcAddr)
12932        return nullptr;
12933    return table.GetInstanceProcAddr(instance, funcName);
12934}
12935
12936static PFN_vkVoidFunction
12937intercept_core_instance_command(const char *name) {
12938    static const struct {
12939        const char *name;
12940        PFN_vkVoidFunction proc;
12941    } core_instance_commands[] = {
12942        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
12943        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
12944        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
12945        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
12946        { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
12947        { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
12948        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
12949        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
12950        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
12951        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
12952        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
12953    };
12954
12955    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
12956        if (!strcmp(core_instance_commands[i].name, name))
12957            return core_instance_commands[i].proc;
12958    }
12959
12960    return nullptr;
12961}
12962
12963static PFN_vkVoidFunction
12964intercept_core_device_command(const char *name) {
12965    static const struct {
12966        const char *name;
12967        PFN_vkVoidFunction proc;
12968    } core_device_commands[] = {
12969        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
12970        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
12971        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
12972        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
12973        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
12974        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
12975        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
12976        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
12977        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
12978        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
12979        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
12980        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
12981        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
12982        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
12983        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
12984        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
12985        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
12986        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
12987        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
12988        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
12989        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
12990        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
12991        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
12992        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
12993        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
12994        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
12995        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
12996        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
12997        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
12998        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
12999        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
13000        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
13001        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
13002        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
13003        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
13004        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
13005        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
13006        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
13007        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
13008        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
13009        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
13010        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
13011        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
13012        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
13013        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
13014        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
13015        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
13016        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
13017        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
13018        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
13019        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
13020        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
13021        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
13022        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
13023        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
13024        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
13025        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
13026        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
13027        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
13028        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
13029        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
13030        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
13031        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
13032        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
13033        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
13034        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
13035        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
13036        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
13037        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
13038        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
13039        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
13040        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
13041        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
13042        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
13043        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
13044        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
13045        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
13046        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
13047        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
13048        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
13049        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
13050        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
13051        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
13052        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
13053        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
13054        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
13055        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
13056        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
13057        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
13058        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
13059        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
13060        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
13061        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
13062        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
13063        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
13064        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
13065        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
13066        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
13067        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
13068        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
13069        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
13070        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
13071        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
13072        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
13073        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
13074        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
13075        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
13076        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
13077        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
13078        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
13079        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
13080        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
13081        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
13082        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
13083        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
13084        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
13085    };
13086
13087    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
13088        if (!strcmp(core_device_commands[i].name, name))
13089            return core_device_commands[i].proc;
13090    }
13091
13092    return nullptr;
13093}
13094
13095static PFN_vkVoidFunction
13096intercept_khr_swapchain_command(const char *name, VkDevice dev) {
13097    static const struct {
13098        const char *name;
13099        PFN_vkVoidFunction proc;
13100    } khr_swapchain_commands[] = {
13101        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
13102        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
13103        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
13104        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
13105        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
13106    };
13107    layer_data *dev_data = nullptr;
13108
13109    if (dev) {
13110        dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
13111        if (!dev_data->device_extensions.wsi_enabled)
13112            return nullptr;
13113    }
13114
13115    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
13116        if (!strcmp(khr_swapchain_commands[i].name, name))
13117            return khr_swapchain_commands[i].proc;
13118    }
13119
13120    if (dev_data) {
13121        if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
13122            return nullptr;
13123    }
13124
13125    if (!strcmp("vkCreateSharedSwapchainsKHR", name))
13126        return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
13127
13128    return nullptr;
13129}
13130
13131static PFN_vkVoidFunction
13132intercept_khr_surface_command(const char *name, VkInstance instance) {
13133    static const struct {
13134        const char *name;
13135        PFN_vkVoidFunction proc;
13136        bool instance_layer_data::*enable;
13137    } khr_surface_commands[] = {
13138#ifdef VK_USE_PLATFORM_ANDROID_KHR
13139        {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
13140            &instance_layer_data::androidSurfaceExtensionEnabled},
13141#endif // VK_USE_PLATFORM_ANDROID_KHR
13142#ifdef VK_USE_PLATFORM_MIR_KHR
13143        {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
13144            &instance_layer_data::mirSurfaceExtensionEnabled},
13145#endif // VK_USE_PLATFORM_MIR_KHR
13146#ifdef VK_USE_PLATFORM_WAYLAND_KHR
13147        {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
13148            &instance_layer_data::waylandSurfaceExtensionEnabled},
13149#endif // VK_USE_PLATFORM_WAYLAND_KHR
13150#ifdef VK_USE_PLATFORM_WIN32_KHR
13151        {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
13152            &instance_layer_data::win32SurfaceExtensionEnabled},
13153#endif // VK_USE_PLATFORM_WIN32_KHR
13154#ifdef VK_USE_PLATFORM_XCB_KHR
13155        {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
13156            &instance_layer_data::xcbSurfaceExtensionEnabled},
13157#endif // VK_USE_PLATFORM_XCB_KHR
13158#ifdef VK_USE_PLATFORM_XLIB_KHR
13159        {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
13160            &instance_layer_data::xlibSurfaceExtensionEnabled},
13161#endif // VK_USE_PLATFORM_XLIB_KHR
13162        { "vkCreateDisplayPlaneSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateDisplayPlaneSurfaceKHR),
13163            &instance_layer_data::displayExtensionEnabled},
13164        {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
13165            &instance_layer_data::surfaceExtensionEnabled},
13166        {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceCapabilitiesKHR),
13167            &instance_layer_data::surfaceExtensionEnabled},
13168        {"vkGetPhysicalDeviceSurfaceSupportKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceSupportKHR),
13169            &instance_layer_data::surfaceExtensionEnabled},
13170        {"vkGetPhysicalDeviceSurfacePresentModesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfacePresentModesKHR),
13171            &instance_layer_data::surfaceExtensionEnabled},
13172        {"vkGetPhysicalDeviceSurfaceFormatsKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceFormatsKHR),
13173            &instance_layer_data::surfaceExtensionEnabled},
13174    };
13175
13176    instance_layer_data *instance_data = nullptr;
13177    if (instance) {
13178        instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
13179    }
13180
13181    for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
13182        if (!strcmp(khr_surface_commands[i].name, name)) {
13183            if (instance_data && !(instance_data->*(khr_surface_commands[i].enable)))
13184                return nullptr;
13185            return khr_surface_commands[i].proc;
13186        }
13187    }
13188
13189    return nullptr;
13190}
13191
13192} // namespace core_validation
13193
13194// vk_layer_logging.h expects these to be defined
13195
13196VKAPI_ATTR VkResult VKAPI_CALL
13197vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
13198                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
13199    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
13200}
13201
13202VKAPI_ATTR void VKAPI_CALL
13203vkDestroyDebugReportCallbackEXT(VkInstance instance,
13204                                VkDebugReportCallbackEXT msgCallback,
13205                                const VkAllocationCallbacks *pAllocator) {
13206    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
13207}
13208
13209VKAPI_ATTR void VKAPI_CALL
13210vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
13211                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
13212    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
13213}
13214
13215// loader-layer interface v0, just wrappers since there is only a layer
13216
13217VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13218vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
13219    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
13220}
13221
13222VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13223vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
13224    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
13225}
13226
13227VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13228vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
13229    // the layer command handles VK_NULL_HANDLE just fine internally
13230    assert(physicalDevice == VK_NULL_HANDLE);
13231    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
13232}
13233
13234VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
13235                                                                                    const char *pLayerName, uint32_t *pCount,
13236                                                                                    VkExtensionProperties *pProperties) {
13237    // the layer command handles VK_NULL_HANDLE just fine internally
13238    assert(physicalDevice == VK_NULL_HANDLE);
13239    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
13240}
13241
13242VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
13243    return core_validation::GetDeviceProcAddr(dev, funcName);
13244}
13245
13246VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
13247    return core_validation::GetInstanceProcAddr(instance, funcName);
13248}
13249