core_validation.cpp revision 410598cceec2fd4fd53b470dd77c9ea6a001c181
13b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch/* Copyright (c) 2015-2016 The Khronos Group Inc. 23b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Copyright (c) 2015-2016 Valve Corporation 33b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Copyright (c) 2015-2016 LunarG, Inc. 43b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Copyright (C) 2015-2016 Google Inc. 53b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * 63b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Licensed under the Apache License, Version 2.0 (the "License"); 73b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * you may not use this file except in compliance with the License. 83b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * You may obtain a copy of the License at 93b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * 103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * http://www.apache.org/licenses/LICENSE-2.0 113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * 123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Unless required by applicable law or agreed to in writing, software 133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * distributed under the License is distributed on an "AS IS" BASIS, 143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * See the License for the specific language governing permissions and 163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * limitations under the License. 173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * 183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Author: Cody Northrop <cnorthrop@google.com> 193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Author: Michael Lentine <mlentine@google.com> 203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Author: Tobin Ehlis <tobine@google.com> 213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Author: Chia-I Wu <olv@google.com> 223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Author: Chris Forbes <chrisf@ijw.co.nz> 233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Author: Mark Lobodzinski <mark@lunarg.com> 243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Author: Ian Elliott <ianelliott@google.com> 253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Allow use of STL min and max functions in Windows 283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#define NOMINMAX 293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <SPIRV/spirv.hpp> 313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <algorithm> 323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <assert.h> 333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <iostream> 343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <list> 353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <map> 363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <mutex> 373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <set> 383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch//#include <memory> 393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <stdio.h> 403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <stdlib.h> 413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <string.h> 423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <string> 433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <tuple> 443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "vk_loader_platform.h" 463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "vk_dispatch_table_helper.h" 473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "vk_struct_string_helper_cpp.h" 483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#if defined(__GNUC__) 493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#pragma GCC diagnostic ignored "-Wwrite-strings" 503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#endif 513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#if defined(__GNUC__) 523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#pragma GCC diagnostic warning "-Wwrite-strings" 533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#endif 543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "vk_struct_size_helper.h" 553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "core_validation.h" 563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "vk_layer_table.h" 573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "vk_layer_data.h" 583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "vk_layer_extension_utils.h" 593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "vk_layer_utils.h" 603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "spirv-tools/libspirv.h" 613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#if defined __ANDROID__ 633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include <android/log.h> 643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__)) 653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#else 663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#define LOGCONSOLE(...) \ 673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch { \ 683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch printf(__VA_ARGS__); \ 693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch printf("\n"); \ 703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#endif 723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// This intentionally includes a cpp file 743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#include "vk_safe_struct.cpp" 753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochusing namespace std; 773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochnamespace core_validation { 793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochusing std::unordered_map; 813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochusing std::unordered_set; 823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// WSI Image Objects bypass usual Image Object creation methods. A special Memory 843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Object value will be used to identify them internally. 853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1); 863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// 2nd special memory handle used to flag object as unbound from memory 873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1); 883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined 903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// by the extent of a swapchain targeting the surface. 913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu; 923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstruct devExts { 943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool wsi_enabled; 953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool wsi_display_swapchain_enabled; 963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap; 973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap; 983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch}; 993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// fwd decls 1013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstruct shader_module; 1023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstruct instance_layer_data { 1043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkInstance instance = VK_NULL_HANDLE; 1053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch debug_report_data *report_data = nullptr; 1063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::vector<VkDebugReportCallbackEXT> logging_callback; 1073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkLayerInstanceDispatchTable dispatch_table; 1083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED; 1103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t physical_devices_count = 0; 1113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch CHECK_DISABLED disabled = {}; 1123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map; 1143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map; 1153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool surfaceExtensionEnabled = false; 1173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool displayExtensionEnabled = false; 1183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool androidSurfaceExtensionEnabled = false; 1193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool mirSurfaceExtensionEnabled = false; 1203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool waylandSurfaceExtensionEnabled = false; 1213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool win32SurfaceExtensionEnabled = false; 1223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool xcbSurfaceExtensionEnabled = false; 1233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool xlibSurfaceExtensionEnabled = false; 1243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch}; 1253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstruct layer_data { 1273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch debug_report_data *report_data = nullptr; 1283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkLayerDispatchTable dispatch_table; 1293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch devExts device_extensions = {}; 1313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_set<VkQueue> queues; // All queues under given device 1323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Global set of all cmdBuffers that are inFlight on this device 1333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_set<VkCommandBuffer> globalInFlightCmdBuffers; 1343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Layer specific data 1353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap; 1363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap; 1373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap; 1383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap; 1393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap; 1403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap; 1413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap; 1423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap; 1433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap; 1443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap; 1453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap; 1463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap; 1473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkFence, FENCE_NODE> fenceMap; 1483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkQueue, QUEUE_STATE> queueMap; 1493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkEvent, EVENT_STATE> eventMap; 1503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<QueryObject, bool> queryToStateMap; 1513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap; 1523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap; 1533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap; 1543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap; 1553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap; 1563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap; 1573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap; 1583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap; 1593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkDevice device = VK_NULL_HANDLE; 1613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkPhysicalDevice physical_device = VK_NULL_HANDLE; 1623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_layer_data *instance_data = nullptr; // from device to enclosing instance 1643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkPhysicalDeviceFeatures enabled_features = {}; 1663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Device specific data 1673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {}; 1683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {}; 1693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch}; 1703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// TODO : Do we need to guard access to layer_data_map w/ lock? 1723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic unordered_map<void *, layer_data *> layer_data_map; 173f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdochstatic unordered_map<void *, instance_layer_data *> instance_layer_data_map; 1743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic const VkLayerProperties global_layer = { 1763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer", 1773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch}; 1783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochtemplate <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) { 1803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool foundLayer = false; 1813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) { 1823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) { 1833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch foundLayer = true; 1843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 1853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // This has to be logged to console as we don't have a callback at this point. 1863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) { 1873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", 1883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch global_layer.layerName); 1893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 1903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 1913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 1923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Code imported from shader_checker 1943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void build_def_index(shader_module *); 1953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words 1973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// without the caller needing to care too much about the physical SPIRV module layout. 1983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstruct spirv_inst_iter { 1993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::vector<uint32_t>::const_iterator zero; 2003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::vector<uint32_t>::const_iterator it; 2013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t len() { 2033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto result = *it >> 16; 2043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(result > 0); 2053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 2063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t opcode() { return *it & 0x0ffffu; } 2093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t const &word(unsigned n) { 2113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(n < len()); 2123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return it[n]; 2133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t offset() { return (uint32_t)(it - zero); } 2163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter() {} 2183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {} 2203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool operator==(spirv_inst_iter const &other) { return it == other.it; } 2223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool operator!=(spirv_inst_iter const &other) { return it != other.it; } 2243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter operator++(int) { /* x++ */ 2263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter ii = *this; 2273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch it += len(); 2283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return ii; 2293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter operator++() { /* ++x; */ 2323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch it += len(); 2333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return *this; 2343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* The iterator and the value are the same thing. */ 2373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter &operator*() { return *this; } 2383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter const &operator*() const { return *this; } 2393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch}; 2403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstruct shader_module { 2423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* the spirv image itself */ 2433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch vector<uint32_t> words; 2443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* a mapping of <id> to the first word of its def. this is useful because walking type 2453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * trees, constant expressions, etc requires jumping all over the instruction stream. 2463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 2473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<unsigned, unsigned> def_index; 2483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch shader_module(VkShaderModuleCreateInfo const *pCreateInfo) 2503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)), 2513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch def_index() { 2523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch build_def_index(this); 2543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* expose begin() / end() to enable range-based for */ 2573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */ 2583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } /* just past last insn */ 2593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* given an offset into the module, produce an iterator there. */ 2603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); } 2613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* gets an iterator to the definition of an id */ 2633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter get_def(unsigned id) const { 2643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = def_index.find(id); 2653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == def_index.end()) { 2663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return end(); 2673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return at(it->second); 2693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch}; 2713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// TODO : This can be much smarter, using separate locks for separate global data 2733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic std::mutex global_lock; 2743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL 2763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochIMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) { 2773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto iv_it = dev_data->imageViewMap.find(image_view); 2783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (iv_it == dev_data->imageViewMap.end()) { 2793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 2803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return iv_it->second.get(); 2823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 2833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return sampler node ptr for specified sampler or else NULL 2843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochSAMPLER_STATE *getSamplerState(const layer_data *dev_data, VkSampler sampler) { 2853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto sampler_it = dev_data->samplerMap.find(sampler); 2863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (sampler_it == dev_data->samplerMap.end()) { 2873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 2883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return sampler_it->second.get(); 2903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 2913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return image state ptr for specified image or else NULL 2923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochIMAGE_STATE *getImageState(const layer_data *dev_data, VkImage image) { 2933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto img_it = dev_data->imageMap.find(image); 2943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (img_it == dev_data->imageMap.end()) { 2953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 2963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return img_it->second.get(); 2983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 2993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return buffer state ptr for specified buffer or else NULL 3003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochBUFFER_STATE *getBufferState(const layer_data *dev_data, VkBuffer buffer) { 3013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto buff_it = dev_data->bufferMap.find(buffer); 3023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (buff_it == dev_data->bufferMap.end()) { 3033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return buff_it->second.get(); 3063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return swapchain node for specified swapchain or else NULL 3083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochSWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) { 3093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain); 3103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (swp_it == dev_data->device_extensions.swapchainMap.end()) { 3113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return swp_it->second.get(); 3143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return swapchain for specified image or else NULL 3163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) { 3173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image); 3183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) { 3193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return VK_NULL_HANDLE; 3203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return img_it->second; 3223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return buffer node ptr for specified buffer or else NULL 3243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochBUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) { 3253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto bv_it = my_data->bufferViewMap.find(buffer_view); 3263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (bv_it == my_data->bufferViewMap.end()) { 3273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return bv_it->second.get(); 3303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 3323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochFENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) { 3333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = dev_data->fenceMap.find(fence); 3343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == dev_data->fenceMap.end()) { 3353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return &it->second; 3383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 3403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochEVENT_STATE *getEventNode(layer_data *dev_data, VkEvent event) { 3413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = dev_data->eventMap.find(event); 3423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == dev_data->eventMap.end()) { 3433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return &it->second; 3463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 3483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochQUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) { 3493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = dev_data->queryPoolMap.find(query_pool); 3503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == dev_data->queryPoolMap.end()) { 3513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return &it->second; 3543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 3563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochQUEUE_STATE *getQueueState(layer_data *dev_data, VkQueue queue) { 3573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = dev_data->queueMap.find(queue); 3583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == dev_data->queueMap.end()) { 3593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return &it->second; 3623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 3643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochSEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) { 3653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = dev_data->semaphoreMap.find(semaphore); 3663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == dev_data->semaphoreMap.end()) { 3673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return &it->second; 3703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 3723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochCOMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) { 3733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = dev_data->commandPoolMap.find(pool); 3743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == dev_data->commandPoolMap.end()) { 3753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return &it->second; 3783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 3803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochPHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) { 3813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = instance_data->physical_device_map.find(phys); 3823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == instance_data->physical_device_map.end()) { 3833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return &it->second; 3863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 3883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochSURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) { 3893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = instance_data->surface_map.find(surface); 3903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == instance_data->surface_map.end()) { 3913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 3923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 3933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return &it->second; 3943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 3953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 3963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return ptr to memory binding for given handle of specified type 3973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic BINDABLE *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) { 3983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (type) { 3993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: 4003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return getImageState(my_data, VkImage(handle)); 4013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: 4023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return getBufferState(my_data, VkBuffer(handle)); 4033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 4043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 4053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 4063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 4073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 4083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// prototype 4093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer); 4103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 4113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Helper function to validate correct usage bits set for buffers or images 4123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Verify that (actual & desired) flags != 0 or, 4133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// if strict is true, verify that (actual & desired) flags == desired 4143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// In case of error, report it via dbg callbacks 4153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle, 4163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkDebugReportObjectTypeEXT obj_type, int32_t const msgCode, char const *ty_str, 4173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch char const *func_name, char const *usage_str) { 4183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool correct_usage = false; 4193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 4203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (strict) 4213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch correct_usage = ((actual & desired) == desired); 4223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch else 4233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch correct_usage = ((actual & desired) != 0); 4243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!correct_usage) { 4253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (msgCode == -1) { 4263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: Fix callers with msgCode == -1 to use correct validation checks. 4273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call = 4283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, 4293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64 4303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " used by %s. In this case, %s should have %s set during creation.", 4313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ty_str, obj_handle, func_name, ty_str, usage_str); 4323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 4333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const char *valid_usage = (msgCode == -1) ? "" : validation_error_map[msgCode]; 4343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, msgCode, "MEM", 4353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid usage flag for %s 0x%" PRIxLEAST64 4363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " used by %s. In this case, %s should have %s set during creation. %s", 4373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ty_str, obj_handle, func_name, ty_str, usage_str, valid_usage); 4383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 4393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 4403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 4413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 4423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 4433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Helper function to validate usage flags for buffers 4443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given buffer_state send actual vs. desired usage off to helper above where 4453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// an error will be flagged if usage is not correct 4463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFlags desired, VkBool32 strict, 4473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch int32_t const msgCode, char const *func_name, char const *usage_string) { 4483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return validate_usage_flags(dev_data, image_state->createInfo.usage, desired, strict, 4493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 4503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch msgCode, "image", func_name, usage_string); 4513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 4523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 4533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Helper function to validate usage flags for buffers 4543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given buffer_state send actual vs. desired usage off to helper above where 4553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// an error will be flagged if usage is not correct 4563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_STATE const *buffer_state, VkFlags desired, VkBool32 strict, 4573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch int32_t const msgCode, char const *func_name, char const *usage_string) { 4583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return validate_usage_flags(dev_data, buffer_state->createInfo.usage, desired, strict, 4593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 4603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch msgCode, "buffer", func_name, usage_string); 4613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 4623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 463bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch// Return ptr to info in map container containing mem, or NULL if not found 464bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch// Calls to this function should be wrapped in mutex 4653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochDEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) { 4663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto mem_it = dev_data->memObjMap.find(mem); 4673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_it == dev_data->memObjMap.end()) { 4683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return NULL; 4693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 4703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return mem_it->second.get(); 4713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 4723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 4733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem, 4743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkMemoryAllocateInfo *pAllocateInfo) { 4753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(object != NULL); 4763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 4773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo)); 4783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 4793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 4803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Helper function to print lowercase string of object type 4813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// TODO: Unify string helper functions, this should really come out of a string helper if not there already 482bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdochstatic const char *object_type_to_string(VkDebugReportObjectTypeEXT type) { 4833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (type) { 4843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: 4853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "image"; 4863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: 4873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "buffer"; 4883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: 4893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "image view"; 4903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: 4913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "buffer view"; 4923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: 4933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "swapchain"; 4943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: 4953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "descriptor set"; 4963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: 4973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "framebuffer"; 4983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: 4993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "event"; 5003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: 5013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "query pool"; 5023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: 5033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "descriptor pool"; 5043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: 5053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "command pool"; 5063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: 5073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "pipeline"; 5083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: 5093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "sampler"; 5103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: 5113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "renderpass"; 5123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: 5133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "device memory"; 5143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT: 5153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "semaphore"; 5163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 5173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "unknown"; 5183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 5193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 5203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 5213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid 5223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle, 5233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkDebugReportObjectTypeEXT type, const char *functionName) { 5243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem); 5253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_info) { 5263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!mem_info->bound_ranges[bound_object_handle].valid) { 5273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM", 5293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64 5303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ", please fill the memory before using.", 5313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle); 5323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 5333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 5343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 5353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 5363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given image_state 5373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// If mem is special swapchain key, then verify that image_state valid member is true 5383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Else verify that the image's bound memory range is valid 5393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) { 5403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 5413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!image_state->valid) { 5423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM", 5443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.", 5453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch functionName, reinterpret_cast<uint64_t &>(image_state->image)); 5463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 5473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 5483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), 5493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName); 5503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 5513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 5523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 5533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given buffer_state, verify that the range it's bound to is valid 5543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) { 5553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer), 5563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName); 5573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 5583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For the given memory allocation, set the range bound by the given handle object to the valid param value 5593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) { 5603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem); 5613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_info) { 5623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->bound_ranges[handle].valid = valid; 5633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 5643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 5653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given image node 5663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// If mem is special swapchain key, then set entire image_state to valid param value 5673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Else set the image's bound memory range to valid param value 5683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) { 5693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 5703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch image_state->valid = valid; 5713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 5723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid); 5733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 5743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 5753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given buffer node set the buffer's bound memory range to valid param value 5763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) { 5773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SetMemoryValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer), valid); 5783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 5793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Find CB Info and add mem reference to list container 5803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Find Mem Obj Info and add CB reference to list container 5813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem, 5823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const char *apiName) { 5833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 5843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 5853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Skip validation if this image was created through WSI 5863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 5873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 5883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First update CB binding in MemObj mini CB list 5893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem); 5903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pMemInfo) { 5913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Now update CBInfo's Mem reference list 5923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, cb); 5933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pMemInfo->cb_bindings.insert(cb_node); 5943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object 5953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (cb_node) { 5963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->memObjs.insert(mem); 5973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 5983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 5993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 6013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 6023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 6033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Create binding link between given sampler and command buffer node 6043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvoid AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) { 6053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch sampler_state->cb_bindings.insert(cb_node); 6063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->object_bindings.insert( 6073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch {reinterpret_cast<uint64_t &>(sampler_state->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT}); 6083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 6093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 6103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Create binding link between given image node and command buffer node 6113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvoid AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) { 6123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Skip validation if this image was created through WSI 6133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 6143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First update CB binding in MemObj mini CB list 6153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto mem_binding : image_state->GetBoundMemory()) { 6163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding); 6173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pMemInfo) { 6183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pMemInfo->cb_bindings.insert(cb_node); 6193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Now update CBInfo's Mem reference list 6203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->memObjs.insert(mem_binding); 6213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Now update cb binding for image 6243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT}); 6253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch image_state->cb_bindings.insert(cb_node); 6263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 62813e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdoch 6293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Create binding link between given image view node and its image with command buffer node 6303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvoid AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) { 6313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First add bindings for imageView 6323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch view_state->cb_bindings.insert(cb_node); 6333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->object_bindings.insert( 6343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT}); 6353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto image_state = getImageState(dev_data, view_state->create_info.image); 6363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Add bindings for image within imageView 6373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (image_state) { 6383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch AddCommandBufferBindingImage(dev_data, cb_node, image_state); 6393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 6413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 6423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Create binding link between given buffer node and command buffer node 6433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvoid AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) { 6443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First update CB binding in MemObj mini CB list 6453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto mem_binding : buffer_state->GetBoundMemory()) { 6463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding); 6473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pMemInfo) { 6483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pMemInfo->cb_bindings.insert(cb_node); 6493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Now update CBInfo's Mem reference list 6503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->memObjs.insert(mem_binding); 6513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Now update cb binding for buffer 6543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT}); 6553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch buffer_state->cb_bindings.insert(cb_node); 6563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 6573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 6583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Create binding link between given buffer view node and its buffer with command buffer node 659f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdochvoid AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) { 6603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First add bindings for bufferView 6613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch view_state->cb_bindings.insert(cb_node); 6623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->object_bindings.insert( 6633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT}); 664f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch auto buffer_state = getBufferState(dev_data, view_state->create_info.buffer); 6653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Add bindings for buffer within bufferView 6663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (buffer_state) { 6673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state); 6683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 6703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 6713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For every mem obj bound to particular CB, free bindings related to that CB 6723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) { 6733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (cb_node) { 6743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (cb_node->memObjs.size() > 0) { 6753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto mem : cb_node->memObjs) { 6763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem); 6773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pInfo) { 6783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pInfo->cb_bindings.erase(cb_node); 6793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->memObjs.clear(); 6823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->validate_functions.clear(); 6843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 6863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up 6873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) { 6883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb)); 6893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 6903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 6913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Clear a single object binding from given memory object, or report error if binding is missing 6923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory mem) { 6933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem); 6943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // This obj is bound to a memory object. Remove the reference to this object in that memory object's list 6953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_info) { 6963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->obj_bindings.erase({handle, type}); 6973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 6983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 6993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 7003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 7013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// ClearMemoryObjectBindings clears the binding of objects to memory 7023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For the given object it pulls the memory bindings and makes sure that the bindings 7033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// no longer refer to the object being cleared. This occurs when objects are destroyed. 7043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) { 7053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 7063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type); 7073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_binding) { 7083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!mem_binding->sparse) { 7093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem); 7103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { // Sparse, clear all bindings 7113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto& sparse_mem_binding : mem_binding->sparse_bindings) { 7123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem); 7133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 7143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 7153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 7163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 7173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 7183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 7193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value. 7203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochbool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name, 7213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const char *type_name) { 7223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool result = false; 7233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_NULL_HANDLE == mem) { 7243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, 7253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM", 7263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling " 7273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "vkBind%sMemory().", 7283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch api_name, type_name, handle, type_name); 7293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (MEMORY_UNBOUND == mem) { 7303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, 7313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM", 7323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. " 7333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Memory must not be freed prior to this operation.", 7343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch api_name, type_name, handle); 7353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 7363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 7373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 7383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 7393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Check to see if memory was ever bound to this image 7403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochbool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name) { 7413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool result = false; 7423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) { 7433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<const uint64_t &>(image_state->image), 7443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch api_name, "Image"); 7453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 7463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 7473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 7483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 7493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Check to see if memory was bound to this buffer 7503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochbool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name) { 7513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool result = false; 7523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) { 7533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, 7543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(buffer_state->buffer), api_name, "Buffer"); 7553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 7563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 7573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 7583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 7593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object 7603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For NULL mem case, output warning 7613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Make sure given object is in global object map 7623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// IF a previous binding existed, output validation error 763f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch// Otherwise, add reference from objectInfo to memoryInfo 7643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Add reference off of objInfo 7653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type, 7663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const char *apiName) { 7673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 7683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // It's an error to bind an object to NULL memory 7693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem == VK_NULL_HANDLE) { 7703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ, 7713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle); 7723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 7733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type); 7743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(mem_binding); 7753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : Add check here to make sure object isn't sparse 776f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch // VALIDATION_ERROR_00792 for buffers 777f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch // VALIDATION_ERROR_00804 for images 778f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch assert(!mem_binding->sparse); 779f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem); 7803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_info) { 7813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, mem_binding->binding.mem); 7823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (prev_binding) { 7833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 7843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 7853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM", 7863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64 7873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ") which has already been bound to mem object 0x%" PRIxLEAST64, 7883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem)); 7893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (mem_binding->binding.mem == MEMORY_UNBOUND) { 7903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 7913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 7923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM", 7933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64 7943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ") which was previous bound to memory that has since been freed. Memory bindings are immutable in " 7953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Vulkan so this attempt to bind to new memory is not allowed.", 7963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch apiName, reinterpret_cast<uint64_t &>(mem), handle); 7973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 7983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->obj_bindings.insert({handle, type}); 7993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // For image objects, make sure default memory state is correctly set 8003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : What's the best/correct way to handle this? 8013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) { 8023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto const image_state = getImageState(dev_data, VkImage(handle)); 8033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (image_state) { 8043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkImageCreateInfo ici = image_state->createInfo; 805f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { 806f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch // TODO:: More memory state transition stuff. 807f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 808f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 8093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 8103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_binding->binding.mem = mem; 8113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 8123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 8133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 8143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 8153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 8163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 8173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For NULL mem case, clear any previous binding Else... 8183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Make sure given object is in its object map 8193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// IF a previous binding existed, update binding 8203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Add reference from objectInfo to memoryInfo 8213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Add reference off of object's binding info 8223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return VK_TRUE if addition is successful, VK_FALSE otherwise 8233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VkDebugReportObjectTypeEXT type, 8243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const char *apiName) { 8253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = VK_FALSE; 8263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Handle NULL case separately, just clear previous binding & decrement reference 8273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (binding.mem == VK_NULL_HANDLE) { 8283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : This should cause the range of the resource to be unbound according to spec 8293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 8303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type); 8313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(mem_binding); 8323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(mem_binding->sparse); 8333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, binding.mem); 8343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_info) { 8353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->obj_bindings.insert({handle, type}); 8363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Need to set mem binding for this object 8373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_binding->sparse_bindings.insert(binding); 8383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 8393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 8403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 8413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 8423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 8433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return a string representation of CMD_TYPE enum 8443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic string cmdTypeToString(CMD_TYPE cmd) { 8453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (cmd) { 8463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BINDPIPELINE: 8473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_BINDPIPELINE"; 8483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BINDPIPELINEDELTA: 8493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_BINDPIPELINEDELTA"; 8503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETVIEWPORTSTATE: 8513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_SETVIEWPORTSTATE"; 8523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETLINEWIDTHSTATE: 8533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_SETLINEWIDTHSTATE"; 8543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETDEPTHBIASSTATE: 8553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_SETDEPTHBIASSTATE"; 8563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETBLENDSTATE: 8573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_SETBLENDSTATE"; 8583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETDEPTHBOUNDSSTATE: 8593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_SETDEPTHBOUNDSSTATE"; 8603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETSTENCILREADMASKSTATE: 8613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_SETSTENCILREADMASKSTATE"; 8623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETSTENCILWRITEMASKSTATE: 8633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_SETSTENCILWRITEMASKSTATE"; 8643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETSTENCILREFERENCESTATE: 8653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_SETSTENCILREFERENCESTATE"; 8663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BINDDESCRIPTORSETS: 8673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_BINDDESCRIPTORSETS"; 8683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BINDINDEXBUFFER: 8693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_BINDINDEXBUFFER"; 8703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BINDVERTEXBUFFER: 8713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_BINDVERTEXBUFFER"; 8723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DRAW: 8733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_DRAW"; 8743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DRAWINDEXED: 8753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_DRAWINDEXED"; 8763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DRAWINDIRECT: 8773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_DRAWINDIRECT"; 8783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DRAWINDEXEDINDIRECT: 8793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_DRAWINDEXEDINDIRECT"; 8803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DISPATCH: 8813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_DISPATCH"; 8823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DISPATCHINDIRECT: 8833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_DISPATCHINDIRECT"; 8843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_COPYBUFFER: 8853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_COPYBUFFER"; 8863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_COPYIMAGE: 8873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_COPYIMAGE"; 8883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BLITIMAGE: 8893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_BLITIMAGE"; 8903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_COPYBUFFERTOIMAGE: 8913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_COPYBUFFERTOIMAGE"; 8923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_COPYIMAGETOBUFFER: 8933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_COPYIMAGETOBUFFER"; 8943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_CLONEIMAGEDATA: 8953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_CLONEIMAGEDATA"; 8963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_UPDATEBUFFER: 8973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_UPDATEBUFFER"; 8983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_FILLBUFFER: 8993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_FILLBUFFER"; 9003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_CLEARCOLORIMAGE: 9013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_CLEARCOLORIMAGE"; 9023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_CLEARATTACHMENTS: 9033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_CLEARCOLORATTACHMENT"; 9043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_CLEARDEPTHSTENCILIMAGE: 9053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_CLEARDEPTHSTENCILIMAGE"; 9063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_RESOLVEIMAGE: 9073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_RESOLVEIMAGE"; 9083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETEVENT: 9093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_SETEVENT"; 9103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_RESETEVENT: 9113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_RESETEVENT"; 9123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_WAITEVENTS: 9133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_WAITEVENTS"; 9143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_PIPELINEBARRIER: 9153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_PIPELINEBARRIER"; 9163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BEGINQUERY: 9173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_BEGINQUERY"; 9183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_ENDQUERY: 9193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_ENDQUERY"; 9203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_RESETQUERYPOOL: 9213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_RESETQUERYPOOL"; 9223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_COPYQUERYPOOLRESULTS: 9233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_COPYQUERYPOOLRESULTS"; 9243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_WRITETIMESTAMP: 9253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_WRITETIMESTAMP"; 9263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_INITATOMICCOUNTERS: 9273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_INITATOMICCOUNTERS"; 9283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_LOADATOMICCOUNTERS: 9293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_LOADATOMICCOUNTERS"; 9303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SAVEATOMICCOUNTERS: 9313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_SAVEATOMICCOUNTERS"; 9323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BEGINRENDERPASS: 9333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_BEGINRENDERPASS"; 9343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_ENDRENDERPASS: 9353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "CMD_ENDRENDERPASS"; 9363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 9373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "UNKNOWN"; 9383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 9393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 9403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 9413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// SPIRV utility functions 9423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void build_def_index(shader_module *module) { 9433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto insn : *module) { 9443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (insn.opcode()) { 9453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Types */ 9463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeVoid: 9473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeBool: 9483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeInt: 9493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeFloat: 9503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeVector: 9513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeMatrix: 9523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeImage: 9533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeSampler: 9543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeSampledImage: 9553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeArray: 9563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeRuntimeArray: 9573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeStruct: 9583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeOpaque: 9593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypePointer: 9603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeFunction: 9613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeEvent: 9623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeDeviceEvent: 9633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeReserveId: 9643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeQueue: 9653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypePipe: 9663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch module->def_index[insn.word(1)] = insn.offset(); 9673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 9683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 9693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Fixed constants */ 9703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpConstantTrue: 9713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpConstantFalse: 9723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpConstant: 9733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpConstantComposite: 9743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpConstantSampler: 9753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpConstantNull: 9763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch module->def_index[insn.word(2)] = insn.offset(); 97713e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdoch break; 97813e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdoch 9793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Specialization constants */ 9803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpSpecConstantTrue: 9813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpSpecConstantFalse: 9823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpSpecConstant: 9833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpSpecConstantComposite: 9843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpSpecConstantOp: 9853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch module->def_index[insn.word(2)] = insn.offset(); 9863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 9873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 9883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Variables */ 9893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpVariable: 9903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch module->def_index[insn.word(2)] = insn.offset(); 9913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 9923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 9933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Functions */ 9943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpFunction: 9953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch module->def_index[insn.word(2)] = insn.offset(); 9963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 9973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 9983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 9993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* We don't care about any other defs for now. */ 10003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 10013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 10023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 10033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 10043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 10053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) { 10063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto insn : *src) { 10073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.opcode() == spv::OpEntryPoint) { 10083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto entrypointName = (char const *)&insn.word(3); 10093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto entrypointStageBits = 1u << insn.word(1); 10103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1011f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) { 1012f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return insn; 1013f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 1014f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 1015f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 1016f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch 1017f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return src->end(); 1018f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch} 1019f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch 1020f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdochstatic char const *storage_class_name(unsigned sc) { 1021f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch switch (sc) { 1022f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch case spv::StorageClassInput: 1023f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return "input"; 1024f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch case spv::StorageClassOutput: 10253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "output"; 10263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::StorageClassUniformConstant: 10273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "const uniform"; 10283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::StorageClassUniform: 10293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "uniform"; 10303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::StorageClassWorkgroup: 10313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "workgroup local"; 10323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::StorageClassCrossWorkgroup: 10333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "workgroup global"; 10343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::StorageClassPrivate: 10353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "private global"; 10363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::StorageClassFunction: 10373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "function"; 10383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::StorageClassGeneric: 10393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "generic"; 10403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::StorageClassAtomicCounter: 10413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "atomic counter"; 10423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::StorageClassImage: 10433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "image"; 10443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::StorageClassPushConstant: 10453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "push constant"; 10463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 10473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return "unknown"; 10483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 1049f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch} 1050f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch 1051f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch/* get the value of an integral constant */ 1052f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdochunsigned get_constant_value(shader_module const *src, unsigned id) { 10533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto value = src->get_def(id); 10543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(value != src->end()); 10553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 10563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (value.opcode() != spv::OpConstant) { 10573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* TODO: Either ensure that the specialization transform is already performed on a module we're 10583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch considering here, OR -- specialize on the fly now. 10593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 10603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return 1; 10613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 10623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1063f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return value.word(3); 10643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 10653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 10663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 10673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) { 10683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto insn = src->get_def(type); 10693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(insn != src->end()); 10703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 10713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (insn.opcode()) { 10723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeBool: 10733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "bool"; 10743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 10753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeInt: 10763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2); 10773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 10783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeFloat: 10793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "float" << insn.word(2); 10803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 10813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeVector: 10823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "vec" << insn.word(3) << " of "; 10833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type_inner(ss, src, insn.word(2)); 10843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 10853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeMatrix: 10863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "mat" << insn.word(3) << " of "; 10873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type_inner(ss, src, insn.word(2)); 10883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 10893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeArray: 10903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of "; 10913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type_inner(ss, src, insn.word(2)); 10923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 10933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypePointer: 10943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "ptr to " << storage_class_name(insn.word(2)) << " "; 10953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type_inner(ss, src, insn.word(3)); 10963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 10973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeStruct: { 10983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "struct of ("; 10993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (unsigned i = 2; i < insn.len(); i++) { 11001eae5e700a1e41eec085eac042831417f736879eBen Murdoch describe_type_inner(ss, src, insn.word(i)); 11013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (i == insn.len() - 1) { 11023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << ")"; 11033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 11043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << ", "; 11053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 11083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeSampler: 11103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "sampler"; 11113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 11123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeSampledImage: 11133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "sampler+"; 11143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type_inner(ss, src, insn.word(2)); 11153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 11163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeImage: 11173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")"; 11183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 11193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 11203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "oddtype"; 11213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 11223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 11243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic std::string describe_type(shader_module const *src, unsigned type) { 11273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::ostringstream ss; 11283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type_inner(ss, src, type); 11293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return ss.str(); 11303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 11313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool is_narrow_numeric_type(spirv_inst_iter type) 11343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch{ 11353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat) 11363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 11373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return type.word(2) < 64; 11383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 11393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) { 11423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* walk two type trees together, and complain about differences */ 11433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto a_insn = a->get_def(a_type); 11443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto b_insn = b->get_def(b_type); 11453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(a_insn != a->end()); 11463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(b_insn != b->end()); 11473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) { 11493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed); 11503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) { 11533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */ 11543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed); 11553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) { 11583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false); 11593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (a_insn.opcode() != b_insn.opcode()) { 11623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 11633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (a_insn.opcode() == spv::OpTypePointer) { 11663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* match on pointee type. storage class is expected to differ */ 11673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed); 11683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (a_arrayed || b_arrayed) { 11713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* if we havent resolved array-of-verts by here, we're not going to. */ 11723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 11733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 11753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (a_insn.opcode()) { 11763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeBool: 11773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 11783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeInt: 11793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* match on width, signedness */ 11803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3); 11813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeFloat: 11823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* match on width */ 11833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return a_insn.word(2) == b_insn.word(2); 11843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeVector: 11853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* match on element type, count. */ 11863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false)) 11873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 11883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) { 11893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return a_insn.word(3) >= b_insn.word(3); 11903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch else { 11923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return a_insn.word(3) == b_insn.word(3); 11933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 11943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeMatrix: 11953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* match on element type, count. */ 11963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3); 11973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeArray: 11983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* match on element type, count. these all have the same layout. we don't get here if 11993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction, 12003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * not a literal within OpTypeArray */ 12013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && 12023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3)); 12033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeStruct: 12043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* match on all element types */ 12053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch { 12063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (a_insn.len() != b_insn.len()) { 12073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; /* structs cannot match if member counts differ */ 12083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 12093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (unsigned i = 2; i < a_insn.len(); i++) { 12113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) { 12123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 12133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 12143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 12153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 12173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 12183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 12193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* remaining types are CLisms, or may not appear in the interfaces we 12203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * are interested in. Just claim no match. 12213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 12223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 12233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 12243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 12253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) { 12273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = map.find(id); 12283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == map.end()) 12293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return def; 12303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch else 12313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return it->second; 12323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 12333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) { 12353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto insn = src->get_def(type); 12363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(insn != src->end()); 12373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (insn.opcode()) { 12393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypePointer: 12403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* see through the ptr -- this is only ever at the toplevel for graphics shaders; 12413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * we're never actually passing pointers around. */ 12423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return get_locations_consumed_by_type(src, insn.word(3), strip_array_level); 12433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeArray: 12443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (strip_array_level) { 12453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return get_locations_consumed_by_type(src, insn.word(2), false); 12463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 12473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false); 12483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 12493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeMatrix: 12503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* num locations is the dimension * element size */ 12513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false); 12523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeVector: { 12533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto scalar_type = src->get_def(insn.word(2)); 12543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ? 12553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch scalar_type.word(2) : 32; 12563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit 12583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * types require two. */ 12593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return (bit_width * insn.word(3) + 127) / 128; 12603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 12613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 12623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* everything else is just 1. */ 12633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return 1; 12643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* TODO: extend to handle 64bit scalar types, whose vectors may need 12663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * multiple locations. */ 12673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 12683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 12693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic unsigned get_locations_consumed_by_format(VkFormat format) { 12713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (format) { 12723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64B64A64_SFLOAT: 12733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64B64A64_SINT: 12743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64B64A64_UINT: 12753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64B64_SFLOAT: 12763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64B64_SINT: 12773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64B64_UINT: 12783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return 2; 12793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 12803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return 1; 12813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 12823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 12833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochtypedef std::pair<unsigned, unsigned> location_t; 12853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochtypedef std::pair<unsigned, unsigned> descriptor_slot_t; 12863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstruct interface_var { 12883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t id; 12893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t type_id; 12903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t offset; 12913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool is_patch; 12923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool is_block_member; 12933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool is_relaxed_precision; 12943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* TODO: collect the name, too? Isn't required to be present. */ 12953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch}; 12963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 12973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstruct shader_stage_attributes { 12983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch char const *const name; 12993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool arrayed_input; 13003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool arrayed_output; 13013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch}; 13023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic shader_stage_attributes shader_stage_attribs[] = { 13043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch {"vertex shader", false, false}, 13053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch {"tessellation control shader", true, true}, 13063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch {"tessellation evaluation shader", true, false}, 13073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch {"geometry shader", true, false}, 13083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch {"fragment shader", false, false}, 13093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch}; 13103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) { 13123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while (true) { 13133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (def.opcode() == spv::OpTypePointer) { 13153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch def = src->get_def(def.word(3)); 131613e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdoch } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) { 131713e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdoch def = src->get_def(def.word(2)); 131813e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdoch is_array_of_verts = false; 131913e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdoch } else if (def.opcode() == spv::OpTypeStruct) { 132013e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdoch return def; 132113e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdoch } else { 132213e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdoch return src->end(); 13233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 13263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void collect_interface_block_members(shader_module const *src, 13283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::map<location_t, interface_var> *out, 13293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts, 13303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t id, uint32_t type_id, bool is_patch) { 13313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */ 13323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch); 13333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) { 13343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* this isn't an interface block. */ 13353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return; 13363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> member_components; 13393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> member_relaxed_precision; 13403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */ 13423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto insn : *src) { 13433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 13443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned member_index = insn.word(2); 13453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(3) == spv::DecorationComponent) { 13473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned component = insn.word(4); 13483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch member_components[member_index] = component; 13493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(3) == spv::DecorationRelaxedPrecision) { 13523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch member_relaxed_precision[member_index] = 1; 13533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Second pass -- produce the output, from Location decorations */ 13583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto insn : *src) { 13593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 13603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned member_index = insn.word(2); 13613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned member_type_id = type.word(2 + member_index); 13623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(3) == spv::DecorationLocation) { 13643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned location = insn.word(4); 13653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false); 13663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto component_it = member_components.find(member_index); 13673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned component = component_it == member_components.end() ? 0 : component_it->second; 13683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool is_relaxed_precision = member_relaxed_precision.find(member_index) != member_relaxed_precision.end(); 13693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (unsigned int offset = 0; offset < num_locations; offset++) { 13713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch interface_var v = {}; 13723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.id = id; 13733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* TODO: member index in interface_var too? */ 13743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.type_id = member_type_id; 13753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.offset = offset; 13763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.is_patch = is_patch; 13773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.is_block_member = true; 13783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.is_relaxed_precision = is_relaxed_precision; 13793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (*out)[std::make_pair(location + offset, component)] = v; 13803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 13843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 13853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic std::map<location_t, interface_var> collect_interface_by_location( 13873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch shader_module const *src, spirv_inst_iter entrypoint, 13883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spv::StorageClass sinterface, bool is_array_of_verts) { 13893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> var_locations; 13913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> var_builtins; 13923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> var_components; 13933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> blocks; 13943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> var_patch; 13953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> var_relaxed_precision; 13963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto insn : *src) { 13983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 13993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* We consider two interface models: SSO rendezvous-by-location, and 14003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * builtins. Complain about anything that fits neither model. 14013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 14023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.opcode() == spv::OpDecorate) { 14033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(2) == spv::DecorationLocation) { 14043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch var_locations[insn.word(1)] = insn.word(3); 14053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(2) == spv::DecorationBuiltIn) { 14083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch var_builtins[insn.word(1)] = insn.word(3); 14093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(2) == spv::DecorationComponent) { 14123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch var_components[insn.word(1)] = insn.word(3); 14133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(2) == spv::DecorationBlock) { 14163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch blocks[insn.word(1)] = 1; 14173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(2) == spv::DecorationPatch) { 14203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch var_patch[insn.word(1)] = 1; 14213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(2) == spv::DecorationRelaxedPrecision) { 14243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch var_relaxed_precision[insn.word(1)] = 1; 14253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* TODO: handle grouped decorations */ 14303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* TODO: handle index=1 dual source outputs from FS -- two vars will 14313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * have the same location, and we DON'T want to clobber. */ 14323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* find the end of the entrypoint's name string. additional zero bytes follow the actual null 14343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch terminator, to fill out the rest of the word - so we only need to look at the last byte in 14353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch the word to determine which word contains the terminator. */ 14363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t word = 3; 14373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while (entrypoint.word(word) & 0xff000000u) { 14383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ++word; 14393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ++word; 14413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::map<location_t, interface_var> out; 14433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (; word < entrypoint.len(); word++) { 14453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto insn = src->get_def(entrypoint.word(word)); 14463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(insn != src->end()); 14473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(insn.opcode() == spv::OpVariable); 14483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(3) == static_cast<uint32_t>(sinterface)) { 14503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned id = insn.word(2); 14513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned type = insn.word(1); 14523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch int location = value_or_default(var_locations, id, -1); 14543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch int builtin = value_or_default(var_builtins, id, -1); 14553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */ 14563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool is_patch = var_patch.find(id) != var_patch.end(); 14573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool is_relaxed_precision = var_relaxed_precision.find(id) != var_relaxed_precision.end(); 14583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* All variables and interface block members in the Input or Output storage classes 14603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * must be decorated with either a builtin or an explicit location. 14613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * 14623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * TODO: integrate the interface block support here. For now, don't complain -- 14633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * a valid SPIRV module will only hit this path for the interface block case, as the 14643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * individual members of the type are decorated, rather than variable declarations. 14653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 14663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (location != -1) { 14683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* A user-defined interface variable, with a location. Where a variable 14693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * occupied multiple locations, emit one result for each. */ 14703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch); 14713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (unsigned int offset = 0; offset < num_locations; offset++) { 14723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch interface_var v = {}; 14733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.id = id; 14743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.type_id = type; 14753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.offset = offset; 14763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.is_patch = is_patch; 14773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.is_relaxed_precision = is_relaxed_precision; 14783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch out[std::make_pair(location + offset, component)] = v; 14793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (builtin == -1) { 14813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* An interface block instance */ 14823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch); 14833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 14863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return out; 14883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 14893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index( 14913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch debug_report_data *report_data, shader_module const *src, 14923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_set<uint32_t> const &accessible_ids) { 14933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::vector<std::pair<uint32_t, interface_var>> out; 14953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 14963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto insn : *src) { 14973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.opcode() == spv::OpDecorate) { 14983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(2) == spv::DecorationInputAttachmentIndex) { 14993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto attachment_index = insn.word(3); 15003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto id = insn.word(1); 15013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (accessible_ids.count(id)) { 15033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto def = src->get_def(id); 15043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(def != src->end()); 15053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) { 15073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto num_locations = get_locations_consumed_by_type(src, def.word(1), false); 15083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (unsigned int offset = 0; offset < num_locations; offset++) { 15093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch interface_var v = {}; 15103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.id = id; 15113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.type_id = def.word(1); 15123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.offset = offset; 15133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch out.emplace_back(attachment_index + offset, v); 15143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return out; 15223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 15233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot( 15253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch debug_report_data *report_data, shader_module const *src, 15263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_set<uint32_t> const &accessible_ids) { 15273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> var_sets; 15293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<unsigned, unsigned> var_bindings; 15303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto insn : *src) { 15323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both 15333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * DecorationDescriptorSet and DecorationBinding. 15343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 15353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.opcode() == spv::OpDecorate) { 15363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(2) == spv::DecorationDescriptorSet) { 15373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch var_sets[insn.word(1)] = insn.word(3); 15383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(2) == spv::DecorationBinding) { 15413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch var_bindings[insn.word(1)] = insn.word(3); 15423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::vector<std::pair<descriptor_slot_t, interface_var>> out; 15473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto id : accessible_ids) { 15493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto insn = src->get_def(id); 15503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(insn != src->end()); 15513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.opcode() == spv::OpVariable && 15533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) { 15543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned set = value_or_default(var_sets, insn.word(2), 0); 15553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned binding = value_or_default(var_bindings, insn.word(2), 0); 15563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch interface_var v = {}; 15583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.id = insn.word(2); 15593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch v.type_id = insn.word(1); 15603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch out.emplace_back(std::make_pair(set, binding), v); 15613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return out; 15653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 15663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer, 15683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage, 15693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch shader_module const *consumer, spirv_inst_iter consumer_entrypoint, 15703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch shader_stage_attributes const *consumer_stage) { 15713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool pass = true; 15723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output); 15743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input); 15753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto a_it = outputs.begin(); 15773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto b_it = inputs.begin(); 15783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* maps sorted by key (location); walk them together to find mismatches */ 15803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) { 15813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool a_at_end = outputs.size() == 0 || a_it == outputs.end(); 15823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool b_at_end = inputs.size() == 0 || b_it == inputs.end(); 15833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first; 15843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first; 15853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 15863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (b_at_end || ((!a_at_end) && (a_first < b_first))) { 15873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 15883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 15893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first, 15903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch a_first.second, consumer_stage->name)) { 15913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 15923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 15933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch a_it++; 15943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (a_at_end || a_first > b_first) { 15953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 15963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", 15973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second, 15983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch producer_stage->name)) { 15993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 16003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 16013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch b_it++; 16023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 16033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // subtleties of arrayed interfaces: 16043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // - if is_patch, then the member is not arrayed, even though the interface may be. 16053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // - if is_block_member, then the extra array level of an arrayed interface is not 16063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // expressed in the member type -- it's expressed in the block type. 16073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, 16083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member, 16093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member, 16103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch true)) { 16113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 16123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'", 16133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch a_first.first, a_first.second, 16143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type(producer, a_it->second.type_id).c_str(), 16153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type(consumer, b_it->second.type_id).c_str())) { 16163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 16173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 16183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 16193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (a_it->second.is_patch != b_it->second.is_patch) { 16203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0, 16213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 16223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Decoration mismatch on location %u.%u: is per-%s in %s stage but " 16233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "per-%s in %s stage", a_first.first, a_first.second, 16243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch a_it->second.is_patch ? "patch" : "vertex", producer_stage->name, 16253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) { 16263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 16273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 16283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 16293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) { 16303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0, 16313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 16323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Decoration mismatch on location %u.%u: %s and %s stages differ in precision", 16333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch a_first.first, a_first.second, 16343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch producer_stage->name, 16353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch consumer_stage->name)) { 16363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 16373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 16383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 16393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch a_it++; 16403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch b_it++; 16413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 16423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 16433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 16443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pass; 16453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 16463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 16473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochenum FORMAT_TYPE { 16483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FORMAT_TYPE_UNDEFINED, 16493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */ 16503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FORMAT_TYPE_SINT, 16513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FORMAT_TYPE_UINT, 16523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch}; 16533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 16543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic unsigned get_format_type(VkFormat fmt) { 16553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (fmt) { 16563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_UNDEFINED: 16573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return FORMAT_TYPE_UNDEFINED; 16583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R8_SINT: 16593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R8G8_SINT: 16603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R8G8B8_SINT: 16613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R8G8B8A8_SINT: 16623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R16_SINT: 16633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R16G16_SINT: 16643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R16G16B16_SINT: 16653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R16G16B16A16_SINT: 16663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R32_SINT: 16673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R32G32_SINT: 16683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R32G32B32_SINT: 16693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R32G32B32A32_SINT: 16703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64_SINT: 16713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64_SINT: 16723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64B64_SINT: 16733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64B64A64_SINT: 16743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_B8G8R8_SINT: 16753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_B8G8R8A8_SINT: 16763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_A8B8G8R8_SINT_PACK32: 16773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_A2B10G10R10_SINT_PACK32: 16783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_A2R10G10B10_SINT_PACK32: 16793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return FORMAT_TYPE_SINT; 16803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R8_UINT: 16813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R8G8_UINT: 16823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R8G8B8_UINT: 16833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R8G8B8A8_UINT: 16843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R16_UINT: 16853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R16G16_UINT: 16863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R16G16B16_UINT: 16873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R16G16B16A16_UINT: 16883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R32_UINT: 16893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R32G32_UINT: 16903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R32G32B32_UINT: 16913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R32G32B32A32_UINT: 16923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64_UINT: 16933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64_UINT: 16943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64B64_UINT: 16953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_R64G64B64A64_UINT: 16963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_B8G8R8_UINT: 16973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_B8G8R8A8_UINT: 16983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_A8B8G8R8_UINT_PACK32: 16993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_A2B10G10R10_UINT_PACK32: 17003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_FORMAT_A2R10G10B10_UINT_PACK32: 17013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return FORMAT_TYPE_UINT; 17023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 17033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return FORMAT_TYPE_FLOAT; 17043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 17053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 17063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch/* characterizes a SPIR-V type appearing in an interface to a FF stage, 17083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * for comparison to a VkFormat's characterization above. */ 17093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic unsigned get_fundamental_type(shader_module const *src, unsigned type) { 17103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto insn = src->get_def(type); 17113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(insn != src->end()); 17123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1713bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch switch (insn.opcode()) { 17143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeInt: 17153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT; 17163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeFloat: 17173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return FORMAT_TYPE_FLOAT; 17183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeVector: 17193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return get_fundamental_type(src, insn.word(2)); 17203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeMatrix: 17213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return get_fundamental_type(src, insn.word(2)); 17223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeArray: 17233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return get_fundamental_type(src, insn.word(2)); 17243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypePointer: 17253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return get_fundamental_type(src, insn.word(3)); 17263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeImage: 17273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return get_fundamental_type(src, insn.word(2)); 17283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 17303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return FORMAT_TYPE_UNDEFINED; 17313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 17323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 17333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) { 17353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t bit_pos = u_ffs(stage); 17363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return bit_pos - 1; 17373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 17383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) { 17403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer. 17413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * each binding should be specified only once. 17423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 17433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings; 17443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool pass = true; 17453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) { 17473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto desc = &vi->pVertexBindingDescriptions[i]; 17483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto &binding = bindings[desc->binding]; 17493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (binding) { 17503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 17513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC", 17523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Duplicate vertex input binding descriptions for binding %d", desc->binding)) { 17533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 17543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 17553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 17563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch binding = desc; 17573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 17583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 17593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pass; 17613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 17623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi, 17643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch shader_module const *vs, spirv_inst_iter entrypoint) { 17653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool pass = true; 17663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false); 17683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Build index by location */ 17703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs; 17713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (vi) { 17723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) { 17733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format); 17743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto j = 0u; j < num_locations; j++) { 1775bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i]; 17763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 17773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 17783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 17793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it_a = attribs.begin(); 17813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it_b = inputs.begin(); 17823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool used = false; 17833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 17843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) { 17853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool a_at_end = attribs.size() == 0 || it_a == attribs.end(); 17863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool b_at_end = inputs.size() == 0 || it_b == inputs.end(); 17873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto a_first = a_at_end ? 0 : it_a->first; 17883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto b_first = b_at_end ? 0 : it_b->first.first; 17893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!a_at_end && (b_at_end || a_first < b_first)) { 17903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 17913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 17923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Vertex attribute at location %d not consumed by vertex shader", a_first)) { 17933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 17943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 17953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch used = false; 17963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch it_a++; 17973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (!b_at_end && (a_at_end || b_first < a_first)) { 17983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0, 17993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided", 18003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch b_first)) { 1801bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch pass = false; 1802bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 1803bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch it_b++; 18043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 1805bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch unsigned attrib_type = get_format_type(it_a->second->format); 1806bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch unsigned input_type = get_fundamental_type(vs, it_b->second.type_id); 1807bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch 18083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* type checking */ 18093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) { 18103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 18113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 18123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`", 1813bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch string_VkFormat(it_a->second->format), a_first, 18143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type(vs, it_b->second.type_id).c_str())) { 18153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 18163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* OK! */ 18203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch used = true; 18213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch it_b++; 18223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pass; 18263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 18273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs, 18293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci, 18303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t subpass_index) { 18313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::map<uint32_t, VkFormat> color_attachments; 18323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto subpass = rpci->pSubpasses[subpass_index]; 18333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) { 18343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t attachment = subpass.pColorAttachments[i].attachment; 18353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (attachment == VK_ATTACHMENT_UNUSED) 18363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch continue; 18373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) { 18383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch color_attachments[i] = rpci->pAttachments[attachment].format; 18393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool pass = true; 18433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */ 18453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false); 18473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it_a = outputs.begin(); 18493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it_b = color_attachments.begin(); 18503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Walk attachment list and outputs together */ 18523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) { 18543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool a_at_end = outputs.size() == 0 || it_a == outputs.end(); 18553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end(); 18563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) { 18583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 18593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 18603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "fragment shader writes to output location %d with no matching attachment", it_a->first.first)) { 18613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 18623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch it_a++; 18643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) { 18653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 18663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader", 18673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch it_b->first)) { 18683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 18693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch it_b++; 1871bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } else { 18723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned output_type = get_fundamental_type(fs, it_a->second.type_id); 18733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned att_type = get_format_type(it_b->second); 18743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* type checking */ 18763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) { 18773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 18783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 18793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first, 18803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string_VkFormat(it_b->second), 18813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type(fs, it_a->second.type_id).c_str())) { 18823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 18833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* OK! */ 18873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch it_a++; 18883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch it_b++; 18893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 18913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pass; 18933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 18943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 18953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch/* For some analyses, we need to know about all ids referenced by the static call tree of a particular 18963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint, 18973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * for example. 18983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses. 18993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * - NOT the shader input/output interfaces. 19003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * 19013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth 19023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * converting parts of this to be generated from the machine-readable spec instead. 1903bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch */ 1904bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdochstatic std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) { 1905bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch std::unordered_set<uint32_t> ids; 1906bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch std::unordered_set<uint32_t> worklist; 1907bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch worklist.insert(entrypoint.word(2)); 1908bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch 1909bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch while (!worklist.empty()) { 1910bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch auto id_iter = worklist.begin(); 1911bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch auto id = *id_iter; 1912bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch worklist.erase(id_iter); 1913bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch 1914bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch auto insn = src->get_def(id); 1915bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch if (insn == src->end()) { 1916bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble 1917bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch * across all kinds of things here that we may not care about. */ 1918bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch continue; 1919bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 1920bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch 1921bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch /* try to add to the output set */ 1922bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch if (!ids.insert(id).second) { 1923bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch continue; /* if we already saw this id, we don't want to walk it again. */ 1924bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 1925bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch 1926bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch switch (insn.opcode()) { 1927bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpFunction: 1928bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch /* scan whole body of the function, enlisting anything interesting */ 1929bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch while (++insn, insn.opcode() != spv::OpFunctionEnd) { 1930bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch switch (insn.opcode()) { 1931bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpLoad: 1932bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicLoad: 1933bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicExchange: 1934bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicCompareExchange: 1935bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicCompareExchangeWeak: 1936bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicIIncrement: 1937bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicIDecrement: 1938bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicIAdd: 1939bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicISub: 1940bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicSMin: 1941bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicUMin: 1942bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicSMax: 1943bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicUMax: 1944bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicAnd: 1945bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicOr: 1946bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicXor: 1947bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch worklist.insert(insn.word(3)); /* ptr */ 1948bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch break; 1949bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpStore: 1950bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAtomicStore: 1951bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch worklist.insert(insn.word(1)); /* ptr */ 1952bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch break; 1953bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpAccessChain: 1954bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpInBoundsAccessChain: 1955bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch worklist.insert(insn.word(3)); /* base ptr */ 1956bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch break; 1957bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpSampledImage: 1958bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSampleImplicitLod: 19593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpImageSampleExplicitLod: 19603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpImageSampleDrefImplicitLod: 1961bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSampleDrefExplicitLod: 1962bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSampleProjImplicitLod: 1963bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSampleProjExplicitLod: 1964bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSampleProjDrefImplicitLod: 1965bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSampleProjDrefExplicitLod: 1966bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageFetch: 1967bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageGather: 1968bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageDrefGather: 1969bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageRead: 1970bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImage: 1971bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageQueryFormat: 1972bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageQueryOrder: 1973bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageQuerySizeLod: 19743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpImageQuerySize: 1975bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageQueryLod: 1976bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageQueryLevels: 1977bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageQuerySamples: 1978bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseSampleImplicitLod: 1979bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseSampleExplicitLod: 1980bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseSampleDrefImplicitLod: 1981bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseSampleDrefExplicitLod: 1982bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseSampleProjImplicitLod: 1983bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseSampleProjExplicitLod: 1984bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseSampleProjDrefImplicitLod: 1985bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseSampleProjDrefExplicitLod: 1986bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseFetch: 1987bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseGather: 1988bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageSparseDrefGather: 1989bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageTexelPointer: 1990bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch worklist.insert(insn.word(3)); /* image or sampled image */ 1991bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch break; 1992bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpImageWrite: 1993bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch worklist.insert(insn.word(1)); /* image -- different operand order to above */ 1994bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch break; 1995bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpFunctionCall: 1996bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch for (uint32_t i = 3; i < insn.len(); i++) { 1997bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch worklist.insert(insn.word(i)); /* fn itself, and all args */ 1998bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 1999bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch break; 2000bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch 2001bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch case spv::OpExtInst: 2002bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch for (uint32_t i = 5; i < insn.len(); i++) { 2003bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch worklist.insert(insn.word(i)); /* operands to ext inst */ 2004bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 2005bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch break; 2006bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 2007bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 2008bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch break; 2009bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 2010bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 20113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return ids; 20133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 20143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_push_constant_block_against_pipeline(debug_report_data *report_data, 20163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::vector<VkPushConstantRange> const *push_constant_ranges, 20173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch shader_module const *src, spirv_inst_iter type, 20183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkShaderStageFlagBits stage) { 20193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool pass = true; 20203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* strip off ptrs etc */ 20223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch type = get_struct_type(src, type, false); 20233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(type != src->end()); 20243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* validate directly off the offsets. this isn't quite correct for arrays 20263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * and matrices, but is a good first step. TODO: arrays, matrices, weird 2027bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch * sizes */ 2028bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch for (auto insn : *src) { 20293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 20303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(3) == spv::DecorationOffset) { 20323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned offset = insn.word(4); 20333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto size = 4; /* bytes; TODO: calculate this based on the type */ 20343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool found_range = false; 20363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto const &range : *push_constant_ranges) { 20373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (range.offset <= offset && range.offset + range.size >= offset + size) { 20383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch found_range = true; 20393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((range.stageFlags & stage) == 0) { 20413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 20423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC", 20433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Push constant range covering variable starting at " 20443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "offset %u not accessible from stage %s", 20453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch offset, string_VkShaderStageFlagBits(stage))) { 20463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 20473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 2048bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 2049bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch 20503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 20513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 20523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 20533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!found_range) { 20553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 20563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC", 20573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Push constant range covering variable starting at " 20583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "offset %u not declared in layout", 20593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch offset)) { 20603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 2061bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 20623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 20633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 20643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 20653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 20663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pass; 20683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 20693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_push_constant_usage(debug_report_data *report_data, 20713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src, 2072bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) { 20733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool pass = true; 20743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto id : accessible_ids) { 20763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto def_insn = src->get_def(id); 20773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) { 20783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src, 20793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch src->get_def(def_insn.word(1)), stage); 20803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 20813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 20823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pass; 2084bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch} 20853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given pipelineLayout verify that the set_layout_node at slot.first 20873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// has the requested binding at slot.second and return ptr to that binding 20883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) { 20893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pipelineLayout) 20913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 20923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (slot.first >= pipelineLayout->set_layouts.size()) 20943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 20953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2096bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second); 20973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 20983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 20993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Block of code at start here for managing/tracking Pipeline state that this layer cares about 21003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0}; 21023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound 21043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates 21053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// to that same cmd buffer by separate thread are not changing state from underneath us 21063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Track the last cmd buffer touched by this thread 21073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool hasDrawCmd(GLOBAL_CB_NODE *pCB) { 21093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) { 21103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pCB->drawCount[i]) 21113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 21123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 21143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 21153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Check object status for selected flag state 21173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags, 21183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAW_STATE_ERROR error_code, const char *fail_msg) { 21193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!(pNode->status & status_mask)) { 21203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 21213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS", 21223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "command buffer object 0x%p: %s", pNode->commandBuffer, fail_msg); 21233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 21253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 21263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Retrieve pipeline node ptr for given pipeline object 21283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic PIPELINE_STATE *getPipelineState(layer_data const *my_data, VkPipeline pipeline) { 21293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = my_data->pipelineMap.find(pipeline); 21303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == my_data->pipelineMap.end()) { 21313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 21323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return it->second; 21343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 21353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic RENDER_PASS_STATE *getRenderPassState(layer_data const *my_data, VkRenderPass renderpass) { 21373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = my_data->renderPassMap.find(renderpass); 21383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == my_data->renderPassMap.end()) { 21393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 21403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return it->second.get(); 21423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 21433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic FRAMEBUFFER_STATE *getFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer) { 21453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = my_data->frameBufferMap.find(framebuffer); 21463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == my_data->frameBufferMap.end()) { 21473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 21483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return it->second.get(); 21503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 21513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochcvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) { 21533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = my_data->descriptorSetLayoutMap.find(dsLayout); 21543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == my_data->descriptorSetLayoutMap.end()) { 21553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 21563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return it->second; 21583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 21593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) { 21613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = my_data->pipelineLayoutMap.find(pipeLayout); 21623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == my_data->pipelineLayoutMap.end()) { 21633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return nullptr; 21643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return &it->second; 21663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 21673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return true if for a given PSO, the given state enum is dynamic, else return false 21693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) { 21703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) { 21713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { 21723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) 21733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 21743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 21773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 21783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 21793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Validate state stored as flags at time of draw call 21803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexedDraw) { 21813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool result = false; 21823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipe->graphicsPipelineCI.pInputAssemblyState && 21833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) || 21843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) { 21853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 21863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer"); 21873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipe->graphicsPipelineCI.pRasterizationState && 21893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) { 21903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 21913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer"); 21923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipe->blendConstantsEnabled) { 21943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 21953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer"); 21963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 21973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipe->graphicsPipelineCI.pDepthStencilState && 21983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) { 21993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 22003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer"); 22013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 22023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipe->graphicsPipelineCI.pDepthStencilState && 22033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) { 22043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 22053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer"); 22063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 22073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer"); 22083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 22093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer"); 22103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 22113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (indexedDraw) { 22123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT, 22133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INDEX_BUFFER_NOT_BOUND, 22143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Index buffer object not bound to this command buffer when Indexed Draw attempted"); 22153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 22163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 22173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 22183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 22193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Verify attachment reference compatibility according to spec 22203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this 22213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions 22223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// to make sure that format and samples counts match. 22233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// If not, they are not compatible. 22243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary, 22253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments, 22263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkAttachmentReference *pSecondary, const uint32_t secondaryCount, 22273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkAttachmentDescription *pSecondaryAttachments) { 22283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Check potential NULL cases first to avoid nullptr issues later 22293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPrimary == nullptr) { 22303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pSecondary == nullptr) { 22313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 22323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 22333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 22343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (pSecondary == nullptr) { 22353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 22363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 22373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED 22383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) 22393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 22403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED 22413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) 22423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 22433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { // Format and sample count must match 22443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) { 22453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 22463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) { 22473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 22483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 22493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((pPrimaryAttachments[pPrimary[index].attachment].format == 22503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSecondaryAttachments[pSecondary[index].attachment].format) && 22513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPrimaryAttachments[pPrimary[index].attachment].samples == 22523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSecondaryAttachments[pSecondary[index].attachment].samples)) 22533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 22543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 22553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Format and sample counts didn't match 22563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 22573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 22583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code 22593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible 22603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI, 22613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) { 22623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) { 22633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch stringstream errorStr; 22643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount 22653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses."; 22663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorMsg = errorStr.str(); 22673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 22683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 22693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t spIndex = 0; 22703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) { 22713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible 22723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount; 22733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount; 22743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount); 22753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) { 22763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount, 22773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments, 22783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch secondaryColorCount, secondaryRPCI->pAttachments)) { 22793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch stringstream errorStr; 22803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible."; 22813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorMsg = errorStr.str(); 22823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 22833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments, 22843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch primaryColorCount, primaryRPCI->pAttachments, 22853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch secondaryRPCI->pSubpasses[spIndex].pResolveAttachments, 22863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch secondaryColorCount, secondaryRPCI->pAttachments)) { 22873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch stringstream errorStr; 22883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible."; 22893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorMsg = errorStr.str(); 22903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 22913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 22923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 22933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 22943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 22953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1, primaryRPCI->pAttachments, 22963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 22973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 1, secondaryRPCI->pAttachments)) { 22983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch stringstream errorStr; 22993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible."; 23003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorMsg = errorStr.str(); 23013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 23023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount; 23053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount; 23063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount); 23073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < inputMax; ++i) { 23083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount, 23093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments, 23103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch secondaryColorCount, secondaryRPCI->pAttachments)) { 23113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch stringstream errorStr; 23123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible."; 23133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorMsg = errorStr.str(); 23143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 23153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 23193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 23203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to 23223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// pipelineLayout[layoutIndex] 23233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet, 23243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex, 23253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string &errorMsg) { 23263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto num_sets = pipeline_layout->set_layouts.size(); 23273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (layoutIndex >= num_sets) { 23283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch stringstream errorStr; 23293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets 23303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index " 23313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch << layoutIndex; 23323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorMsg = errorStr.str(); 23333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 23343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto layout_node = pipeline_layout->set_layouts[layoutIndex]; 23363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pSet->IsCompatible(layout_node, &errorMsg); 23373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 23383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Validate that data for each specialization entry is fully contained within the buffer. 23403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) { 23413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool pass = true; 23423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23431eae5e700a1e41eec085eac042831417f736879eBen Murdoch VkSpecializationInfo const *spec = info->pSpecializationInfo; 23441eae5e700a1e41eec085eac042831417f736879eBen Murdoch 23453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (spec) { 23463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto i = 0u; i < spec->mapEntryCount; i++) { 23473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) { 23483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 23493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC", 23503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Specialization entry %u (for constant id %u) references memory outside provided " 23513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER 23523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " bytes provided)", 23533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset, 23543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) { 23553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 23573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pass; 23633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 23643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool descriptor_type_match(shader_module const *module, uint32_t type_id, 23663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkDescriptorType descriptor_type, unsigned &descriptor_count) { 23673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto type = module->get_def(type_id); 23683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch descriptor_count = 1; 23703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Strip off any array or ptrs. Where we remove array levels, adjust the 23723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * descriptor count for each dimension. */ 23733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) { 23743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (type.opcode() == spv::OpTypeArray) { 23753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch descriptor_count *= get_constant_value(module, type.word(3)); 23763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch type = module->get_def(type.word(2)); 23773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch else { 23793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch type = module->get_def(type.word(3)); 23803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (type.opcode()) { 23843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeStruct: { 23853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto insn : *module) { 23863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) { 23873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.word(2) == spv::DecorationBlock) { 23883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || 23893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; 23901eae5e700a1e41eec085eac042831417f736879eBen Murdoch } else if (insn.word(2) == spv::DecorationBufferBlock) { 23913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || 23923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; 23933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 23963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 23973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Invalid */ 23983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 23993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 24003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeSampler: 24023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER || 24033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; 24043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeSampledImage: 24063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { 24073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Slight relaxation for some GLSL historical madness: samplerBuffer 24083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * doesn't really have a sampler, and a texel buffer descriptor 24093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * doesn't really provide one. Allow this slight mismatch. 24103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 24113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto image_type = module->get_def(type.word(2)); 24123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto dim = image_type.word(3); 24133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto sampled = image_type.word(7); 24143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return dim == spv::DimBuffer && sampled == 1; 24153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 24163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; 24173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeImage: { 24193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Many descriptor types backing image types-- depends on dimension 24203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * and whether the image will be used with a sampler. SPIRV for 24213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * Vulkan requires that sampled be 1 or 2 -- leaving the decision to 24223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * runtime is unacceptable. 24233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 24243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto dim = type.word(3); 24253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto sampled = type.word(7); 24263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dim == spv::DimSubpassData) { 24283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; 24293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (dim == spv::DimBuffer) { 24303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (sampled == 1) { 24313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; 24323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 24333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; 24343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 24353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (sampled == 1) { 24363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE || 24373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; 24383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 24393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; 24403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 24413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 24423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* We shouldn't really see any other junk types -- but if we do, they're 24443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * a mismatch. 24453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 24463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 24473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; /* Mismatch */ 24483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 24493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 24503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) { 24523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!feature) { 24533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 24543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC", 24553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Shader requires VkPhysicalDeviceFeatures::%s but is not " 24563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "enabled on the device", 24571eae5e700a1e41eec085eac042831417f736879eBen Murdoch feature_name)) { 24583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 24593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 24603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 24613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 24633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 24643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src, 24663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkPhysicalDeviceFeatures const *enabledFeatures) { 24673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool pass = true; 24683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto insn : *src) { 24713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (insn.opcode() == spv::OpCapability) { 24723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (insn.word(1)) { 24733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityMatrix: 24743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityShader: 24753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityInputAttachment: 24763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilitySampled1D: 24773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityImage1D: 24783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilitySampledBuffer: 24793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityImageBuffer: 24803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityImageQuery: 24813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityDerivativeControl: 24823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Always supported by a Vulkan 1.0 implementation -- no feature bits. 24833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 24843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityGeometry: 24863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader"); 24873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 24883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityTessellation: 24903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader"); 24913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 24923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityFloat64: 24943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64"); 24953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 24963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 24973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityInt64: 24983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64"); 24993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityTessellationPointSize: 25023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityGeometryPointSize: 25033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize, 25043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "shaderTessellationAndGeometryPointSize"); 25053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityImageGatherExtended: 25083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended"); 25093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityStorageImageMultisample: 25123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample"); 25133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityUniformBufferArrayDynamicIndexing: 25163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing, 25173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "shaderUniformBufferArrayDynamicIndexing"); 25183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilitySampledImageArrayDynamicIndexing: 25213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing, 25223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "shaderSampledImageArrayDynamicIndexing"); 25233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityStorageBufferArrayDynamicIndexing: 25263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing, 25273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "shaderStorageBufferArrayDynamicIndexing"); 25283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityStorageImageArrayDynamicIndexing: 25313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing, 25323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "shaderStorageImageArrayDynamicIndexing"); 25333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityClipDistance: 25363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance"); 25373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityCullDistance: 25403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance"); 25413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityImageCubeArray: 25443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray"); 25453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilitySampleRateShading: 25483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading"); 25493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilitySparseResidency: 25523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency"); 25533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityMinLod: 25563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod"); 25573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 2559f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch case spv::CapabilitySampledCubeArray: 2560f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray"); 25613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 2562f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch 2563f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch case spv::CapabilityImageMSArray: 25643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample"); 25653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityStorageImageExtendedFormats: 25683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats, 25693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "shaderStorageImageExtendedFormats"); 25703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityInterpolationFunction: 25733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading"); 25743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityStorageImageReadWithoutFormat: 25773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat, 25783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "shaderStorageImageReadWithoutFormat"); 25793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityStorageImageWriteWithoutFormat: 25823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat, 25833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "shaderStorageImageWriteWithoutFormat"); 25843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::CapabilityMultiViewport: 25873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport"); 25883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 25903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 25913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 25923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC", 25933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Shader declares capability %u, not supported in Vulkan.", 25943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch insn.word(1))) 25953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 25963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 25973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 25983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 25993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 26003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pass; 26023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 26033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) { 26063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto type = module->get_def(type_id); 26073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while (true) { 26093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (type.opcode()) { 26103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeArray: 26113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeSampledImage: 26123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch type = module->get_def(type.word(2)); 26133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 26143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypePointer: 26153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch type = module->get_def(type.word(3)); 26163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 26173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::OpTypeImage: { 26183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto dim = type.word(3); 26193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto arrayed = type.word(5); 26203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto msaa = type.word(6); 26213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (dim) { 26233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::Dim1D: 26243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D; 26253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::Dim2D: 26263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) | 26273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D); 26283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::Dim3D: 26293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return DESCRIPTOR_REQ_VIEW_TYPE_3D; 26303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::DimCube: 26313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE; 26323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case spv::DimSubpassData: 26333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE; 26343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: // buffer, etc. 26353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return 0; 26363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 26373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 26383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 26393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return 0; 26403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 26413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 26423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 26433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool 26453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvalidate_pipeline_shader_stage(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *pStage, 26463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PIPELINE_STATE *pipeline, shader_module **out_module, spirv_inst_iter *out_entrypoint, 2647f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch VkPhysicalDeviceFeatures const *enabledFeatures, 2648f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch std::unordered_map<VkShaderModule, std::unique_ptr<shader_module>> const &shaderModuleMap) { 2649f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch bool pass = true; 2650f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch auto module_it = shaderModuleMap.find(pStage->module); 2651f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch auto module = *out_module = module_it->second.get(); 26523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* find the entrypoint */ 26543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage); 26553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (entrypoint == module->end()) { 26563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2657f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC", 26583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "No entrypoint found named `%s` for stage %s", pStage->pName, 2659f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch string_VkShaderStageFlagBits(pStage->stage))) { 26603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; // no point continuing beyond here, any analysis is just going to be garbage. 26613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 26623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 26633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* validate shader capabilities against enabled device features */ 26653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= validate_shader_capabilities(report_data, module, enabledFeatures); 26663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* mark accessible ids */ 26683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto accessible_ids = mark_accessible_ids(module, entrypoint); 26693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* validate descriptor set layout against what the entrypoint actually uses */ 26713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids); 26723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pipelineLayout = pipeline->pipeline_layout; 26743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= validate_specialization_offsets(report_data, pStage); 26763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage); 26773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* validate descriptor use */ 26793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto use : descriptor_uses) { 26803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // While validating shaders capture which slots are used by the pipeline 26813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto & reqs = pipeline->active_slots[use.first.first][use.first.second]; 26823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id)); 26833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* verify given pipelineLayout has requested setLayout with requested binding */ 26853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const auto &binding = get_descriptor_binding(&pipelineLayout, use.first); 26863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned required_descriptor_count; 26873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 26883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!binding) { 26893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 26903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC", 26913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout", 26923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) { 26933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 26943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 26953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (~binding->stageFlags & pStage->stage) { 26963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 26973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC", 26983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Shader uses descriptor slot %u.%u (used " 26993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "as type `%s`) but descriptor not " 27003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "accessible from stage %s", 27013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(), 27023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string_VkShaderStageFlagBits(pStage->stage))) { 27033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 27043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType, 27063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /*out*/ required_descriptor_count)) { 27073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 27083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot " 27093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%u.%u (used as type `%s`) but " 27103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "descriptor of type %s", 27113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(), 27123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string_VkDescriptorType(binding->descriptorType))) { 27133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 27143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (binding->descriptorCount < required_descriptor_count) { 27163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 27173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", 27183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided", 27193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch required_descriptor_count, use.first.first, use.first.second, 27203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) { 27213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 27223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* validate use of input attachments against subpass structure */ 27273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) { 27283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids); 27293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto rpci = pipeline->render_pass_ci.ptr(); 27313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto subpass = pipeline->graphicsPipelineCI.subpass; 27323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto use : input_attachment_uses) { 27343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments; 27353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ? 27363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED; 27373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (index == VK_ATTACHMENT_UNUSED) { 27393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 27403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC", 27413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Shader consumes input attachment index %d but not provided in subpass", 27423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch use.first)) { 27433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 27443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch else if (get_format_type(rpci->pAttachments[index].format) != 27473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch get_fundamental_type(module, use.second.type_id)) { 27483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 27493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC", 27503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Subpass input attachment %u format of %s does not match type used in shader `%s`", 27513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch use.first, string_VkFormat(rpci->pAttachments[index].format), 27523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch describe_type(module, use.second.type_id).c_str())) { 27533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass = false; 27543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pass; 27603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 27613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Validate that the shaders used by the given pipeline and store the active_slots 27643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// that are actually used by the pipeline into pPipeline->active_slots 27653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool 27663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvalidate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_STATE *pPipeline, 27673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkPhysicalDeviceFeatures const *enabledFeatures, 27683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) { 27693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr(); 27703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT); 27713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT); 27723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch shader_module *shaders[5]; 27743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch memset(shaders, 0, sizeof(shaders)); 27753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter entrypoints[5]; 27763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch memset(entrypoints, 0, sizeof(entrypoints)); 27773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkPipelineVertexInputStateCreateInfo const *vi = 0; 27783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool pass = true; 27793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { 27813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pStage = &pCreateInfo->pStages[i]; 27823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto stage_id = get_shader_stage_id(pStage->stage); 27833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline, 27843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch &shaders[stage_id], &entrypoints[stage_id], 27853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch enabledFeatures, shaderModuleMap); 27863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // if the shader stages are no good individually, cross-stage validation is pointless. 27893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pass) 27903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 27913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch vi = pCreateInfo->pVertexInputState; 27933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (vi) { 27953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= validate_vi_consistency(report_data, vi); 27963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 27973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 27983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (shaders[vertex_stage]) { 27993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]); 28003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 28013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 28023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT); 28033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); 28043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 28053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while (!shaders[producer] && producer != fragment_stage) { 28063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch producer++; 28073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch consumer++; 28083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 28093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 28103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) { 28113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(shaders[producer]); 28123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (shaders[consumer]) { 28133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= validate_interface_between_stages(report_data, 28143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch shaders[producer], entrypoints[producer], &shader_stage_attribs[producer], 28153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]); 28163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 28173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch producer = consumer; 28183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 28193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 28203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 28213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (shaders[fragment_stage]) { 28223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage], 28233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass); 28243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 28253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 28263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pass; 28273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 28283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 28293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_STATE *pPipeline, 28303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkPhysicalDeviceFeatures const *enabledFeatures, 28313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) { 28323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pCreateInfo = pPipeline->computePipelineCI.ptr(); 28333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 28343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch shader_module *module; 28353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch spirv_inst_iter entrypoint; 28363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 28373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline, 28383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch &module, &entrypoint, enabledFeatures, shaderModuleMap); 28393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 28403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return Set node ptr for specified set or else NULL 28413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochcvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) { 28423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto set_it = my_data->setMap.find(set); 28433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (set_it == my_data->setMap.end()) { 28443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return NULL; 28453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 28463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return set_it->second; 28473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 28483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For the given command buffer, verify and update the state for activeSetBindingsPairs 28493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// This includes: 28503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// 1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound. 28513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// To be valid, the dynamic offset combined with the offset and range from its 28523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// descriptor update must not overflow the size of its buffer being updated 28533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// 2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images 28543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// 3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers 28553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateDrawtimeDescriptorState( 28563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data, GLOBAL_CB_NODE *pCB, 28573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>> 28583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch &activeSetBindingsPairs, 28593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const char *function) { 28603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool result = false; 28613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto set_bindings_pair : activeSetBindingsPairs) { 28623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair); 28633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::string err_str; 28643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair), 28653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch &err_str)) { 28663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Report error here 28673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto set = set_node->GetSet(); 28683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 28693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 28703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s", 28713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(set), function, err_str.c_str()); 28723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 28733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 28743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 28753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 28763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Add any updated buffers and images to the cmd buffer's respective update[Buffers|Images] set 28773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void UpdateDrawtimeDescriptorState( 28783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data, GLOBAL_CB_NODE *pCB, 28793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>> 28803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch &activeSetBindingsPairs) { 28813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto set_bindings_pair : activeSetBindingsPairs) { 28823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair); 28833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages); 2884bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 2885bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch} 2886bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch 2887bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch// For given pipeline, return number of MSAA samples, or one if MSAA disabled 2888bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdochstatic VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) { 2889bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch if (pipe->graphicsPipelineCI.pMultisampleState != NULL && 2890bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) { 2891bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples; 2892bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 2893bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch return VK_SAMPLE_COUNT_1_BIT; 2894bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch} 2895bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch 28963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void list_bits(std::ostream& s, uint32_t bits) { 28973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (int i = 0; i < 32 && bits; i++) { 28983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (bits & (1 << i)) { 28993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch s << i; 29003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bits &= ~(1 << i); 29013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (bits) { 29023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch s << ","; 29033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 29073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 29083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Validate draw-time state related to the PSO 29093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidatePipelineDrawtimeState(layer_data const *my_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB, 29103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PIPELINE_STATE const *pPipeline) { 29113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 29123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 29133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Verify vertex binding 29143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->vertexBindingDescriptions.size() > 0) { 29153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) { 29163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding; 29173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) || 29183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) { 29193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg( 29203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 29213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS", 29223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u " 29233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct " 29243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.", 29253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding); 29263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 29293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pCB->currentDrawData.buffers.empty()) { 29303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 29313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS", 29323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Vertex buffers are bound to command buffer (0x%p" 29333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").", 29343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline); 29353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count. 29383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Skip check if rasterization is disabled or there is no viewport. 29393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((!pPipeline->graphicsPipelineCI.pRasterizationState || 29403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) && 29413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pPipeline->graphicsPipelineCI.pViewportState) { 29423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT); 29433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR); 29443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 29453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dynViewport) { 29463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1; 29473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask; 29483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (missingViewportMask) { 29493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::stringstream ss; 29503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "Dynamic viewport(s) "; 29513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch list_bits(ss, missingViewportMask); 29523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport()."; 29533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 29543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 29553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s", ss.str().c_str()); 29563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 29593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dynScissor) { 29603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1; 29613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask; 29623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (missingScissorMask) { 29633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::stringstream ss; 29643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << "Dynamic scissor(s) "; 29653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch list_bits(ss, missingScissorMask); 29663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor()."; 29673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 29683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 29693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s", ss.str().c_str()); 29703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 29743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Verify that any MSAA request in PSO matches sample# in bound FB 29753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Skip the check if rasterization is disabled. 29763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pPipeline->graphicsPipelineCI.pRasterizationState || 29773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { 29783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline); 29793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pCB->activeRenderPass) { 29803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr(); 29813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass]; 29823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t i; 29833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 29843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState; 29853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) && 29863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) { 29873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 2988bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 29893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 29903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Render pass subpass %u mismatch with blending state defined and blend state attachment " 29913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")! These " 29923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "must be the same at draw-time.", 29933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount, 29943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(pPipeline->pipeline)); 29953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 29963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 29973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unsigned subpass_num_samples = 0; 29983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 29993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (i = 0; i < subpass_desc->colorAttachmentCount; i++) { 30003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto attachment = subpass_desc->pColorAttachments[i].attachment; 30013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (attachment != VK_ATTACHMENT_UNUSED) 30023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples; 30033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 30043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 30053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (subpass_desc->pDepthStencilAttachment && 30063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 30073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto attachment = subpass_desc->pDepthStencilAttachment->attachment; 30083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples; 30093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 30103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 30113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) { 30123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 30133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 30143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS", 3015bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64 3016bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!", 30173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples, 30183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples); 30193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 30203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 30213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 30223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS", 30233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!", 30243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(pPipeline->pipeline)); 30253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 30263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 30273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Verify that PSO creation renderPass is compatible with active renderPass 30283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pCB->activeRenderPass) { 30293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::string err_string; 30303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) && 30313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(), 30323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch err_string)) { 30333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with 30343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 30353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 30363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 30373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline " 30383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s", 30393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), 30403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t const &>(pPipeline->pipeline), 30413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str()); 30423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 30433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 30443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) { 30453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 30463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 30473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 30483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass, 30493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->activeSubpass); 30503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 30513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 30523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : Add more checks here 30533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 30543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 30553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 30563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 30573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Validate overall state at the time of a draw call 30583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateAndUpdateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexedDraw, 30593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkPipelineBindPoint bindPoint, const char *function) { 30603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool result = false; 30613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto const &state = cb_node->lastBound[bindPoint]; 30623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PIPELINE_STATE *pPipe = state.pipeline_state; 30633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (nullptr == pPipe) { 30643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= log_msg( 30653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 30663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE, "DS", 30673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline()."); 30683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Early return as any further checks below will be busted w/o a pipeline 30693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (result) 30703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 30713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 30723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First check flag states 30733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) 30743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result = validate_draw_state_flags(my_data, cb_node, pPipe, indexedDraw); 30753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 30763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Now complete other state checks 30773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_NULL_HANDLE != state.pipeline_layout.layout) { 30783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string errorString; 30793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pipeline_layout = pPipe->pipeline_layout; 30803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 30813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets 30823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>> 30833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch activeSetBindingsPairs; 30843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto & setBindingPair : pPipe->active_slots) { 30853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t setIndex = setBindingPair.first; 30863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // If valid set is not bound throw an error 30873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) { 30883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 30893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS", 30903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline, 30913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch setIndex); 30923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex, 30933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorString)) { 30943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Set is bound but not compatible w/ overlapping pipeline_layout from PSO 30953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet(); 30963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= 30973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 30983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS", 30993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "VkDescriptorSet (0x%" PRIxLEAST64 31003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s", 31013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout), 31023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errorString.c_str()); 31033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { // Valid set is bound and layout compatible, validate that it's updated 31043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Pull the set node 31053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex]; 31063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Gather active bindings 31073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_set<uint32_t> bindings; 31083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto binding : setBindingPair.second) { 31093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bindings.insert(binding.first); 31103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Bind this set and its active descriptor resources to the command buffer 31123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSet->BindCommandBuffer(cb_node, bindings); 31133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Save vector of all active sets to verify dynamicOffsets below 31143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second, &state.dynamicOffsets[setIndex])); 31153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Make sure set has been updated if it has no immutable samplers 31163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // If it has immutable samplers, we'll flag error later as needed depending on binding 31173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pSet->IsUpdated()) { 31183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto binding : bindings) { 31193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) { 31203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= log_msg( 31213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 31223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 31233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Descriptor Set 0x%" PRIxLEAST64 " bound but was never updated. It is now being used to draw so " 31243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "this will result in undefined behavior.", 31253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)pSet->GetSet()); 31263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // For given active slots, verify any dynamic descriptors and record updated images & buffers 31323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= ValidateDrawtimeDescriptorState(my_data, cb_node, activeSetBindingsPairs, function); 31333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch UpdateDrawtimeDescriptorState(my_data, cb_node, activeSetBindingsPairs); 31343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 31363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Check general pipeline state that needs to be validated at drawtime 31373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) 31383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result |= ValidatePipelineDrawtimeState(my_data, state, cb_node, pPipe); 31393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 31403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 31413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 31423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 31433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Validate HW line width capabilities prior to setting requested line width. 31443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) { 31453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 31463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 31473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First check to see if the physical device supports wide lines. 31483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) { 31493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__, 31503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature " 31513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "not supported/enabled so lineWidth must be 1.0f!", 31523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lineWidth); 31533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 31543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Otherwise, make sure the width falls in the valid range. 31553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) || 31563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) { 31573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, 31583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width " 31593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "to between [%f, %f]!", 31603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0], 31613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->phys_dev_properties.properties.limits.lineWidthRange[1]); 31623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 31653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 31663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 31673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 31683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Verify that create state for a pipeline is valid 31693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool verifyPipelineCreateState(layer_data *my_data, std::vector<PIPELINE_STATE *> pPipelines, int pipelineIndex) { 31703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 31713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 31723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex]; 31733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 31743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // If create derivative bit is set, check that we've specified a base 31753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // pipeline correctly, and that the base pipeline was created to allow 31763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // derivatives. 31773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { 31783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PIPELINE_STATE *pBasePipeline = nullptr; 31793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^ 31803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) { 31813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 31823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 31833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified"); 31843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) { 31853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) { 31863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 31873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 31883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 31893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline."); 31903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 31913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex]; 31923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { 31943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pBasePipeline = getPipelineState(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle); 31953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 31963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 31973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { 31983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 31993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 32003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives."); 32013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 32043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) { 32053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!my_data->enabled_features.independentBlend) { 32063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->attachments.size() > 1) { 32073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0]; 32083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (size_t i = 1; i < pPipeline->attachments.size(); i++) { 32093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState 32103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains 32113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // only attachment state, so memcmp is best suited for the comparison 32123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]), 32133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch sizeof(pAttachments[0]))) { 32143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 32153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS", 32163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid Pipeline CreateInfo: If independent blend feature not " 32173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "enabled, all elements of pAttachments must be identical"); 32183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 32193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!my_data->enabled_features.logicOp && 32243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) { 32253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 32263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 32273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_DISABLED_LOGIC_OP, "DS", 32283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE"); 32293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 32323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state 32333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // produces nonsense errors that confuse users. Other layers should already 32343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // emit errors for renderpass being invalid. 32353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto renderPass = getRenderPassState(my_data, pPipeline->graphicsPipelineCI.renderPass); 32363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) { 32373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 32383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u " 32393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "is out of range for this renderpass (0..%u)", 32403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1); 32413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 32433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features, 32443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->shaderModuleMap)) { 32453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call = true; 32463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Each shader's stage must be unique 32483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->duplicate_shaders) { 32493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) { 32503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->duplicate_shaders & stage) { 32513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 32523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 32533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s", 32543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage))); 32553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // VS is required 32593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) { 32603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 32613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 32623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vertex Shader required"); 32633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Either both or neither TC/TE shaders should be defined 32653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) != 32663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) { 32673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 32683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 32693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair"); 32703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Compute shaders should be specified independent of Gfx shaders 32723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) && 32733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipeline->active_shaders & 32743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | 32753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) { 32763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 32773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 32783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline"); 32793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines. 32813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Mismatching primitive topology and tessellation fails graphics pipeline creation. 32823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) && 32833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (!pPipeline->graphicsPipelineCI.pInputAssemblyState || 32843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { 32853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 32863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 32873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA " 32883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "topology for tessellation pipelines"); 32893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->graphicsPipelineCI.pInputAssemblyState && 32913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) { 32923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) { 32933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 32943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 32953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 32963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 32973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "topology is only valid for tessellation pipelines"); 32983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 32993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pPipeline->graphicsPipelineCI.pTessellationState) { 33003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 33013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 33023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid Pipeline CreateInfo State: " 33033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 33043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "topology used. pTessellationState must not be NULL in this case."); 33053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints || 33063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) { 33073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 33083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 33093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 33103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "topology used with patchControlPoints value %u." 33113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " patchControlPoints should be >0 and <=32.", 33123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints); 33133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // If a rasterization state is provided, make sure that the line width conforms to the HW. 33163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->graphicsPipelineCI.pRasterizationState) { 33173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) { 33183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, 33193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t const &>(pPipeline->pipeline), 33203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth); 33213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 33243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // If rasterization is not disabled and subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a 33253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // valid structure 33263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPipeline->graphicsPipelineCI.pRasterizationState && 33273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { 33283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr; 33293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (subpass_desc && subpass_desc->pDepthStencilAttachment && 33303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 33313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pPipeline->graphicsPipelineCI.pDepthStencilState) { 33323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, 33333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 33343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid Pipeline CreateInfo State: " 33353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "pDepthStencilState is NULL when rasterization is enabled and subpass uses a " 33363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "depth/stencil attachment"); 33373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 33413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 33423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 33433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Free the Pipeline nodes 33443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void deletePipelines(layer_data *my_data) { 33453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (my_data->pipelineMap.size() <= 0) 33463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return; 33473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto &pipe_map_pair : my_data->pipelineMap) { 33483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch delete pipe_map_pair.second; 33493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->pipelineMap.clear(); 33513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 33523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 33533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Block of code at start here specifically for managing/tracking DSs 33543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 33553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return Pool node ptr for specified pool or else NULL 33563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochDESCRIPTOR_POOL_STATE *getDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) { 33573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pool_it = dev_data->descriptorPoolMap.find(pool); 33583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pool_it == dev_data->descriptorPoolMap.end()) { 33593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return NULL; 33603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return pool_it->second; 33623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 33633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 33643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return false if update struct is of valid type, otherwise flag error and return code from callback 33653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) { 33663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (pUpdateStruct->sType) { 33673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 33683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 33693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 33703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 33713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 33723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", 33733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", 33743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType); 33753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 33773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 33783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Set count for given update struct in the last parameter 33793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) { 33803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (pUpdateStruct->sType) { 33813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 33823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount; 33833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 33843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : Need to understand this case better and make sure code is correct 33853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount; 33863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 33873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return 0; 33883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 33893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 33903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 33913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given layout and update, return the first overall index of the layout that is updated 33923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index, 33933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) { 33943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return binding_start_index + arrayIndex; 33953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 33963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given layout and update, return the last overall index of the layout that is updated 33973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index, 33983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) { 33993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t count = getUpdateCount(my_data, device, pUpdateStruct); 34003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return binding_start_index + arrayIndex + count - 1; 34013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 34023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Verify that the descriptor type in the update struct matches what's expected by the layout 34033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type, 34043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) { 34053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First get actual type of update 34063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 34073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM; 34083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (pUpdateStruct->sType) { 34093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 34103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType; 34113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 34123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 34133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* no need to validate */ 34143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 34153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 34163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 34173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 34183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", 34193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", 34203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType); 34213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!skip_call) { 34233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (layout_type != actualType) { 34243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg( 34253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 34263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS", 34273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!", 34283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type)); 34293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 34323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 34333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch//TODO: Consolidate functions 34343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochbool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) { 34353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map); 34363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!(imgpair.subresource.aspectMask & aspectMask)) { 34373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 34383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask; 34403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch imgpair.subresource.aspectMask = aspectMask; 34413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto imgsubIt = pCB->imageLayoutMap.find(imgpair); 34423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (imgsubIt == pCB->imageLayoutMap.end()) { 34433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 34443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) { 34463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 34473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 34483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s", 34493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout)); 34503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) { 34523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 34533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 34543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s", 34553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout)); 34563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch node = imgsubIt->second; 34583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 34593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 34603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 34613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochbool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) { 34623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!(imgpair.subresource.aspectMask & aspectMask)) { 34633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 34643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask; 34663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch imgpair.subresource.aspectMask = aspectMask; 34673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto imgsubIt = my_data->imageLayoutMap.find(imgpair); 34683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (imgsubIt == my_data->imageLayoutMap.end()) { 34693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 34703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) { 34723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 34733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 34743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s", 34753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout)); 34763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layout = imgsubIt->second.layout; 34783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 34793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 34803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 34813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// find layout(s) on the cmd buf level 34823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochbool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) { 34833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ImageSubresourcePair imgpair = {image, true, range}; 34843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM); 34853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT); 34863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT); 34873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT); 34883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT); 34893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) { 34903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch imgpair = {image, false, VkImageSubresource()}; 34913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto imgsubIt = pCB->imageLayoutMap.find(imgpair); 34923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (imgsubIt == pCB->imageLayoutMap.end()) 34933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 34943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch node = imgsubIt->second; 34953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 34963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 34973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 34983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 34993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// find layout(s) on the global level 35003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochbool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) { 35013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layout = VK_IMAGE_LAYOUT_MAX_ENUM; 35023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); 35033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); 35043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); 35053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); 35063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) { 35073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch imgpair = {imgpair.image, false, VkImageSubresource()}; 35083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto imgsubIt = my_data->imageLayoutMap.find(imgpair); 35093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (imgsubIt == my_data->imageLayoutMap.end()) 35103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 35113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layout = imgsubIt->second.layout; 35123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 35133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 35143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 35153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 35163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochbool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) { 35173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ImageSubresourcePair imgpair = {image, true, range}; 35183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return FindLayout(my_data, imgpair, layout); 35193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 35203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 35213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochbool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) { 35223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto sub_data = my_data->imageSubresourceMap.find(image); 35233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (sub_data == my_data->imageSubresourceMap.end()) 35243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 35253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto image_state = getImageState(my_data, image); 35263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!image_state) 35273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 35283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool ignoreGlobal = false; 35293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: Make this robust for >1 aspect mask. Now it will just say ignore 35303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // potential errors in this case. 35313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) { 35323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ignoreGlobal = true; 35333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 35343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto imgsubpair : sub_data->second) { 35353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (ignoreGlobal && !imgsubpair.hasSubresource) 35363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch continue; 35373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto img_data = my_data->imageLayoutMap.find(imgsubpair); 35383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (img_data != my_data->imageLayoutMap.end()) { 35393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layouts.push_back(img_data->second.layout); 35403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 35413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 35423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return true; 35433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 35443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 35453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Set the layout on the global level 35463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvoid SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) { 35473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkImage &image = imgpair.image; 35483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO (mlentine): Maybe set format if new? Not used atm. 35493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->imageLayoutMap[imgpair].layout = layout; 35503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO (mlentine): Maybe make vector a set? 35513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair); 35523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (subresource == my_data->imageSubresourceMap[image].end()) { 35533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->imageSubresourceMap[image].push_back(imgpair); 35543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 35553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 35563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 35573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Set the layout on the cmdbuf level 35583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvoid SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) { 35593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->imageLayoutMap[imgpair] = node; 35603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO (mlentine): Maybe make vector a set? 35613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto subresource = 35623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair); 35633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) { 35643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->imageSubresourceMap[imgpair.image].push_back(imgpair); 35653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 35663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 35673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 35683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvoid SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) { 35693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO (mlentine): Maybe make vector a set? 35703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) != 35713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->imageSubresourceMap[imgpair.image].end()) { 35723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->imageLayoutMap[imgpair].layout = layout; 35733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 35743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO (mlentine): Could be expensive and might need to be removed. 35753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(imgpair.hasSubresource); 35763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch IMAGE_CMD_BUF_LAYOUT_NODE node; 35773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) { 35783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch node.initialLayout = layout; 35793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 35803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SetLayout(pCB, imgpair, {node.initialLayout, layout}); 35813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 35823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 35833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 3584bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdochtemplate <class OBJECT, class LAYOUT> 35853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvoid SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) { 35863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (imgpair.subresource.aspectMask & aspectMask) { 35873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch imgpair.subresource.aspectMask = aspectMask; 35883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SetLayout(pObject, imgpair, layout); 35893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 35903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 35913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 35923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochtemplate <class OBJECT, class LAYOUT> 35933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvoid SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) { 35943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ImageSubresourcePair imgpair = {image, true, range}; 35953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); 35963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); 35973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); 3598bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); 3599bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch} 3600bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch 3601bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdochtemplate <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) { 36023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ImageSubresourcePair imgpair = {image, false, VkImageSubresource()}; 36033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SetLayout(pObject, image, imgpair, layout); 36043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 36053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 36063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochvoid SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) { 36073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto view_state = getImageViewState(dev_data, imageView); 36083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(view_state); 36093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto image = view_state->create_info.image; 36103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange; 36113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: Do not iterate over every possibility - consolidate where possible 36123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t j = 0; j < subRange.levelCount; j++) { 36133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t level = subRange.baseMipLevel + j; 36143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t k = 0; k < subRange.layerCount; k++) { 3615bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch uint32_t layer = subRange.baseArrayLayer + k; 36163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkImageSubresource sub = {subRange.aspectMask, level, layer}; 36173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: If ImageView was created with depth or stencil, transition both layouts as 36183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // the aspectMask is ignored and both are used. Verify that the extra implicit layout 36193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // is OK for descriptor set layout validation 36203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { 36213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (vk_format_is_depth_and_stencil(view_state->create_info.format)) { 36223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT); 36233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 36243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 36253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SetLayout(pCB, image, sub, layout); 36263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 36273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 36283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 36293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 36303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer 36313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// func_str is the name of the calling function 36323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return false if no errors occur 36333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain) 363413e2dadd00298019ed862f2b2fc5068bba730bcfBen Murdochstatic bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) { 36353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.idle_descriptor_set) 36363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 36373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 36383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto set_node = dev_data->setMap.find(set); 36393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (set_node == dev_data->setMap.end()) { 36403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 36413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS", 36423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(), 36433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(set)); 36443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 36453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here 36463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (set_node->second->in_use.load()) { 36473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 36483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 36493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS", 36503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s", 36513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]); 36523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 36533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 36543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 36553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 36563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 36573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Remove set from setMap and delete the set 36583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) { 36593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->setMap.erase(descriptor_set->GetSet()); 36603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch delete descriptor_set; 36613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 36623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Free all DS Pools including their Sets & related sub-structs 36633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// NOTE : Calls to this function should be wrapped in mutex 36643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void deletePools(layer_data *my_data) { 36653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (my_data->descriptorPoolMap.size() <= 0) 36663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return; 36673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) { 36683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Remove this pools' sets from setMap and delete them 36693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto ds : (*ii).second->sets) { 36703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch freeDescriptorSet(my_data, ds); 36713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 36723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (*ii).second->sets.clear(); 36733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 36743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_data->descriptorPoolMap.clear(); 36753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 36763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 36773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool, 36783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkDescriptorPoolResetFlags flags) { 36793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DESCRIPTOR_POOL_STATE *pPool = getDescriptorPoolState(my_data, pool); 36803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: validate flags 36813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet 36823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto ds : pPool->sets) { 36833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch freeDescriptorSet(my_data, ds); 36843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 36853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pPool->sets.clear(); 36863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Reset available count for each type and available sets for this pool 36873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) { 36883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i]; 36893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 36903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pPool->availableSets = pPool->maxSets; 36913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 36923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 36933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given CB object, fetch associated CB Node from map 36943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) { 36953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto it = my_data->commandBufferMap.find(cb); 36963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (it == my_data->commandBufferMap.end()) { 36973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 36983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 36993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Attempt to use CommandBuffer 0x%p that doesn't exist!", cb); 3700f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return NULL; 3701f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 3702f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return it->second; 3703f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch} 3704f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch// Free all CB Nodes 3705f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch// NOTE : Calls to this function should be wrapped in mutex 3706f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdochstatic void deleteCommandBuffers(layer_data *my_data) { 3707f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if (my_data->commandBufferMap.empty()) { 3708f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return; 3709f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 3710f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) { 3711f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch delete (*ii).second; 3712f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 3713f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch my_data->commandBufferMap.clear(); 3714f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch} 3715f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch 3716f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdochstatic bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) { 3717f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 3718f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS", 3719f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch "You must call vkBeginCommandBuffer() before this call to %s", caller_name); 3720f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch} 3721f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch 3722f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch// If a renderpass is active, verify that the given command type is appropriate for current subpass state 3723f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdochbool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) { 3724f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if (!pCB->activeRenderPass) 3725f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return false; 3726f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch bool skip_call = false; 3727f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && 3728f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) { 3729f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 37303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 37313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Commands cannot be called in a subpass using secondary command buffers."); 37323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) { 37333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 37343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 37353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "vkCmdExecuteCommands() cannot be called in a subpass using inline commands."); 37363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 37373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 37383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 37393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 37403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 37413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!(flags & VK_QUEUE_GRAPHICS_BIT)) 37423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 37433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 37443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name); 37453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 37463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 37473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 37483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 37493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!(flags & VK_QUEUE_COMPUTE_BIT)) 37503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 37513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 37523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name); 37533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 37543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 37553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 37563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 37573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT))) 37583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 37593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 37603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name); 37613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 37623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 37633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 37643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not 37653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// in the recording state or if there's an issue with the Cmd ordering 37663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) { 37673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 37683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool); 37693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPool) { 37703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags; 37713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (cmd) { 37723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BINDPIPELINE: 37733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BINDPIPELINEDELTA: 37743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BINDDESCRIPTORSETS: 37753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_FILLBUFFER: 37763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_CLEARCOLORIMAGE: 37773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETEVENT: 37783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_RESETEVENT: 37793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_WAITEVENTS: 37803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BEGINQUERY: 37813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_ENDQUERY: 37823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_RESETQUERYPOOL: 37833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_COPYQUERYPOOLRESULTS: 37843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_WRITETIMESTAMP: 37853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str()); 37863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 37873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETVIEWPORTSTATE: 37883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETSCISSORSTATE: 37893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETLINEWIDTHSTATE: 37903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETDEPTHBIASSTATE: 37913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETBLENDSTATE: 37923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETDEPTHBOUNDSSTATE: 37933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETSTENCILREADMASKSTATE: 37943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETSTENCILWRITEMASKSTATE: 37953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_SETSTENCILREFERENCESTATE: 37963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BINDINDEXBUFFER: 37973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BINDVERTEXBUFFER: 37983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DRAW: 37993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DRAWINDEXED: 38003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DRAWINDIRECT: 38013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DRAWINDEXEDINDIRECT: 38023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BLITIMAGE: 38033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_CLEARATTACHMENTS: 38043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_CLEARDEPTHSTENCILIMAGE: 38053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_RESOLVEIMAGE: 38063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_BEGINRENDERPASS: 38073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_NEXTSUBPASS: 38083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_ENDRENDERPASS: 38093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str()); 38103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DISPATCH: 38123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_DISPATCHINDIRECT: 38133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str()); 38143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_COPYBUFFER: 38163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_COPYIMAGE: 38173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_COPYBUFFERTOIMAGE: 38183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_COPYIMAGETOBUFFER: 38193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_CLONEIMAGEDATA: 38203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_UPDATEBUFFER: 38213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_PIPELINEBARRIER: 38223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_EXECUTECOMMANDS: 38233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case CMD_END: 38243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 38263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pCB->state != CB_RECORDING) { 38303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name); 38313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 38323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= ValidateCmdSubpassState(my_data, pCB, cmd); 38333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 38353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 38363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 38373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void UpdateCmdBufferLastCmd(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd) { 38383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (cb_state->state == CB_RECORDING) { 38393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_state->last_cmd = cmd; 38403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 38423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given object struct return a ptr of BASE_NODE type for its wrapping struct 38433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochBASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) { 38443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch BASE_NODE *base_ptr = nullptr; 38453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (object_struct.type) { 38463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: { 38473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle)); 38483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: { 38513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle)); 38523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: { 38553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle)); 38563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: { 38593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle)); 38603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: { 38633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle)); 38643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: { 38673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle)); 38683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: { 38713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle)); 38723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: { 38753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle)); 38763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: { 38793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle)); 38803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: { 38833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle)); 38843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: { 38873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle)); 38883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: { 38913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle)); 38923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: { 38953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle)); 38963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 38973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 38983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: { 38993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle)); 39003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 39013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 39023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 39033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : Any other objects to be handled here? 39043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(0); 39053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 39063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 39073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return base_ptr; 39083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 39093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 39103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Tie the VK_OBJECT to the cmd buffer which includes: 39113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Add object_binding to cmd buffer 39123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Add cb_binding to object 39133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) { 39143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_bindings->insert(cb_node); 39153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->object_bindings.insert(obj); 39163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 39173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For a given object, if cb_node is in that objects cb_bindings, remove cb_node 39183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) { 39193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object); 39203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (base_obj) 39213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj->cb_bindings.erase(cb_node); 39223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 39233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Reset the command buffer state 39243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Maintain the createInfo and set state to CB_NEW, but clear all other state 39253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void resetCB(layer_data *dev_data, const VkCommandBuffer cb) { 39263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb]; 39273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pCB) { 39283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->in_use.store(0); 39293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->last_cmd = CMD_NONE; 39303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Reset CB state (note that createInfo is not cleared) 39313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->commandBuffer = cb; 39323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo)); 39333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo)); 39343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->numCmds = 0; 39353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t)); 39363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->state = CB_NEW; 39373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->submitCount = 0; 39383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->status = 0; 39393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->viewportMask = 0; 39403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->scissorMask = 0; 39413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 39423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 39433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->lastBound[i].reset(); 39443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 39453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 39463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo)); 39473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->activeRenderPass = nullptr; 39483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE; 39493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->activeSubpass = 0; 39503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->broken_bindings.clear(); 39513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->waitedEvents.clear(); 39523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->events.clear(); 39533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->writeEventsBeforeWait.clear(); 39543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->waitedEventsBeforeQueryReset.clear(); 39553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->queryToStateMap.clear(); 39563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->activeQueries.clear(); 39573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->startedQueries.clear(); 39583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->imageSubresourceMap.clear(); 39593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->imageLayoutMap.clear(); 39603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->eventToStageMap.clear(); 39613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->drawData.clear(); 39623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->currentDrawData.buffers.clear(); 39633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->primaryCommandBuffer = VK_NULL_HANDLE; 39643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Make sure any secondaryCommandBuffers are removed from globalInFlight 39653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto secondary_cb : pCB->secondaryCommandBuffers) { 39663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->globalInFlightCmdBuffers.erase(secondary_cb); 39673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 39683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->secondaryCommandBuffers.clear(); 39693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->updateImages.clear(); 39703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->updateBuffers.clear(); 39713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch clear_cmd_buf_and_mem_references(dev_data, pCB); 39723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->eventUpdates.clear(); 39733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->queryUpdates.clear(); 39743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 39753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Remove object bindings 39763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto obj : pCB->object_bindings) { 39773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch removeCommandBufferBinding(dev_data, &obj, pCB); 39783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 39793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->object_bindings.clear(); 39803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list 39813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto framebuffer : pCB->framebuffers) { 39823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto fb_state = getFramebufferState(dev_data, framebuffer); 39833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (fb_state) 39843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch fb_state->cb_bindings.erase(pCB); 39853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 39863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->framebuffers.clear(); 39873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->activeFramebuffer = VK_NULL_HANDLE; 39883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 39893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 39903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 39913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Set PSO-related status bits for CB, including dynamic state set via PSO 39923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) { 39933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Account for any dynamic state not set via this PSO 39943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pPipe->graphicsPipelineCI.pDynamicState || 39953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static 39963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->status |= CBSTATUS_ALL_STATE_SET; 39973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 39983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First consider all state on 39993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Then unset any state that's noted as dynamic in PSO 40003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Finally OR that into CB statemask 40013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET; 40023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { 40033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) { 40043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DYNAMIC_STATE_LINE_WIDTH: 40053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET; 40063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 40073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DYNAMIC_STATE_DEPTH_BIAS: 40083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET; 40093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 40103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DYNAMIC_STATE_BLEND_CONSTANTS: 40113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET; 40123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 40133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DYNAMIC_STATE_DEPTH_BOUNDS: 40143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET; 40153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 40163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK: 40173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET; 40183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 40193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK: 40203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET; 40213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 40223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DYNAMIC_STATE_STENCIL_REFERENCE: 40233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET; 40243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 40253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 40263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : Flag error here 40273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 40283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 40293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 40303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->status |= psoDynStateMask; 40313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 40323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 40333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 40343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Print the last bound Gfx Pipeline 40353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) { 40363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 40373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb); 40383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pCB) { 40393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state; 40403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pPipeTrav) { 40413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // nothing to print 40423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 40433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 40443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_NONE, "DS", "%s", 40453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch vk_print_vkgraphicspipelinecreateinfo( 40463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}") 40473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch .c_str()); 40483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 40493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 40503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 40513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 40523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 40533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) { 40543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 40553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) { 40563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 40573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 40583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= printPipeline(my_data, cb); 40593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 40603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 40613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 40623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Flags validation error if the associated call is made inside a render pass. The apiName 40633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// routine should ONLY be called outside a render pass. 40643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName, 40653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch UNIQUE_VALIDATION_ERROR_CODE msgCode) { 40663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool inside = false; 40673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pCB->activeRenderPass) { 40683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 40693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS", 40703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName, 40713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)pCB->activeRenderPass->renderPass, validation_error_map[msgCode]); 40723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 40733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return inside; 40743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 40753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 40763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Flags validation error if the associated call is made outside a render pass. The apiName 40773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// routine should ONLY be called inside a render pass. 40783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName, 40793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch UNIQUE_VALIDATION_ERROR_CODE msgCode) { 40803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool outside = false; 40813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) || 40823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) && 40833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) { 40843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 40853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS", 40863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]); 40873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 40883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return outside; 40893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 40903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 40913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) { 40923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 40933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation"); 40943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 40953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 40963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 40973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) { 40983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 40993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME)) 41003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->surfaceExtensionEnabled = true; 41013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME)) 41023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->displayExtensionEnabled = true; 41033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#ifdef VK_USE_PLATFORM_ANDROID_KHR 41043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) 41053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->androidSurfaceExtensionEnabled = true; 41063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#endif 41073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#ifdef VK_USE_PLATFORM_MIR_KHR 41083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME)) 4109f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch instance_data->mirSurfaceExtensionEnabled = true; 41103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#endif 41113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#ifdef VK_USE_PLATFORM_WAYLAND_KHR 41123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME)) 41133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->waylandSurfaceExtensionEnabled = true; 41143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#endif 41153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#ifdef VK_USE_PLATFORM_WIN32_KHR 4116f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) 41173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->win32SurfaceExtensionEnabled = true; 4118f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch#endif 4119f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch#ifdef VK_USE_PLATFORM_XCB_KHR 4120f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME)) 41213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->xcbSurfaceExtensionEnabled = true; 41223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#endif 41233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#ifdef VK_USE_PLATFORM_XLIB_KHR 41243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME)) 41253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->xlibSurfaceExtensionEnabled = true; 41263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#endif 41273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 41283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 4129f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch 41303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR VkResult VKAPI_CALL 41313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) { 41323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 41333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(chain_info->u.pLayerInfo); 41353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 4136f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); 41373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (fpCreateInstance == NULL) 4138f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return VK_ERROR_INITIALIZATION_FAILED; 4139f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch 4140f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch // Advance the link info for the next element on the chain 41413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 41423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); 41443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (result != VK_SUCCESS) 41453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 41463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map); 41483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->instance = *pInstance; 41493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr); 41503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->report_data = debug_report_create_instance( 41523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames); 41533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch checkInstanceRegisterExtensions(pCreateInfo, instance_data); 41543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch init_core_validation(instance_data, pAllocator); 41553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ValidateLayerOrdering(*pCreateInfo); 41573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 4159f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch} 41603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch/* hook DestroyInstance to remove tableInstanceMap entry */ 41623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { 41633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODOSC : Shouldn't need any customization here 41643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dispatch_key key = get_dispatch_key(instance); 41653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TBD: Need any locking this early, in case this function is called at the 41663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // same time by more than one thread? 41673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map); 41683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->dispatch_table.DestroyInstance(instance, pAllocator); 41693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::lock_guard<std::mutex> lock(global_lock); 41713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Clean up logging callback, if any 41723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while (instance_data->logging_callback.size() > 0) { 41733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkDebugReportCallbackEXT callback = instance_data->logging_callback.back(); 41743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator); 41753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->logging_callback.pop_back(); 41763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 41773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_debug_report_destroy_instance(instance_data->report_data); 41793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data_map.erase(key); 41803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 41813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { 41833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t i; 41843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TBD: Need any locking, in case this function is called at the same time 41853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // by more than one thread? 41863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 41873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->device_extensions.wsi_enabled = false; 41883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->device_extensions.wsi_display_swapchain_enabled = false; 41893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 41913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) 41923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->device_extensions.wsi_enabled = true; 41933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0) 41943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->device_extensions.wsi_display_swapchain_enabled = true; 41953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 41963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 41973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 41983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Verify that queue family has been properly requested 41993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu, 42003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkDeviceCreateInfo *create_info) { 42013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 42023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto physical_device_state = getPhysicalDeviceState(instance_data, gpu); 42033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First check is app has actually requested queueFamilyProperties 42043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!physical_device_state) { 42053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 42063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL", 42073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices()."); 42083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) { 42093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: This is not called out as an invalid use in the spec so make more informative recommendation. 42103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 42113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, 42123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties()."); 42133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 42143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Check that the requested queue properties are valid 42153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) { 42163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex; 42173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (requestedIndex >= physical_device_state->queue_family_properties.size()) { 42183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg( 42193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 42203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL", 42213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex); 42223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (create_info->pQueueCreateInfos[i].queueCount > 42233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch physical_device_state->queue_family_properties[requestedIndex].queueCount) { 42243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 42253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 42263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL", 42273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but " 42283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "requested queueCount is %u.", 42293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount, 42303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch create_info->pQueueCreateInfos[i].queueCount); 42313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 42323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 42333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 42343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 42353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 42363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 42373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Verify that features have been queried and that they are available 42383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys, const VkPhysicalDeviceFeatures *requested_features) { 42393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 42403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 42413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto phys_device_state = getPhysicalDeviceState(dev_data, phys); 42423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features); 42433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features); 42443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues 42453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Need to provide the struct member name with the issue. To do that seems like we'll 42463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // have to loop through each struct member which should be done w/ codegen to keep in synch. 42473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t errors = 0; 42483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); 42493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < total_bools; i++) { 42503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (requested[i] > actual[i]) { 42513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: Add index to struct member name helper to be able to include a feature name 42523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 42533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, 42543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, " 42553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "which is not available on this device.", 42563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch i); 42573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch errors++; 42583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 42593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 42603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) { 42613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // If user didn't request features, notify them that they should 42623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error 42633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 42643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, 42653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "DL", "You requested features that are unavailable on this device. You should first query feature " 42663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "availability by calling vkGetPhysicalDeviceFeatures()."); 42673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 42683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 42693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 42703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 42713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, 42723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { 42733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map); 42743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 42753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 42763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Check that any requested features are available 42773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pCreateInfo->pEnabledFeatures) { 42783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures); 42793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 42803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo); 42813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 42823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (skip_call) { 42833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return VK_ERROR_VALIDATION_FAILED_EXT; 42843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 42853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 42863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 42873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 42883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(chain_info->u.pLayerInfo); 42893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 42903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; 42913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice"); 42923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (fpCreateDevice == NULL) { 42933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return VK_ERROR_INITIALIZATION_FAILED; 42943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 42953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 42963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Advance the link info for the next element on the chain 42973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 42983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 42993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); 43003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (result != VK_SUCCESS) { 43013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 43023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 43033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 43043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 43053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map); 43063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 43073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_device_data->instance_data = my_instance_data; 43083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Setup device dispatch table 43093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr); 43103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_device_data->device = *pDevice; 43113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Save PhysicalDevice handle 43123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_device_data->physical_device = gpu; 43133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 43143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); 43153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch checkDeviceRegisterExtensions(pCreateInfo, *pDevice); 43163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Get physical device limits for this device 43173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties)); 43183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t count; 43193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr); 43203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_device_data->phys_dev_properties.queue_family_properties.resize(count); 43213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties( 43223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]); 43233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: device limits should make sure these are compatible 43243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pCreateInfo->pEnabledFeatures) { 43253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures; 43263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 43273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures)); 43283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 43293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Store physical device mem limits into device layer_data struct 43303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props); 43313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 43323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 43333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ValidateLayerOrdering(*pCreateInfo); 43343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 43353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 43363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 43373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 43383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// prototype 43393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { 43403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODOSC : Shouldn't need any customization here 43413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 43423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dispatch_key key = get_dispatch_key(device); 43433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(key, layer_data_map); 43443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Free all the memory 43453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 43463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch deletePipelines(dev_data); 43473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->renderPassMap.clear(); 43483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch deleteCommandBuffers(dev_data); 43493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // This will also delete all sets in the pool & remove them from setMap 43503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch deletePools(dev_data); 43513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // All sets should be removed 43523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(dev_data->setMap.empty()); 43533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto del_layout : dev_data->descriptorSetLayoutMap) { 43543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch delete del_layout.second; 43553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 43563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->descriptorSetLayoutMap.clear(); 43573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->imageViewMap.clear(); 43583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->imageMap.clear(); 43593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->imageSubresourceMap.clear(); 43603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->imageLayoutMap.clear(); 43613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->bufferViewMap.clear(); 43623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->bufferMap.clear(); 43633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Queues persist until device is destroyed 43643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->queueMap.clear(); 43653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Report any memory leaks 43663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *pInfo = NULL; 43673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!dev_data->memObjMap.empty()) { 43683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) { 43693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pInfo = (*ii).second.get(); 43703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pInfo->alloc_info.allocationSize != 0) { 43713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Valid Usage: All child objects created on device must have been destroyed prior to destroying device 43723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 43733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM", 43743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling " 43753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().", 43763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem)); 43773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 43783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 43793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 43803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_debug_report_destroy_device(device); 43813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 43823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 43833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#if DISPATCH_MAP_DEBUG 43843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key); 43853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch#endif 43863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!skip) { 43873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->dispatch_table.DestroyDevice(device, pAllocator); 43883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data_map.erase(key); 43893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 43903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 43913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 43923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}}; 43933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 43943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// This validates that the initial layout specified in the command buffer for 43953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// the IMAGE is the same 43963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// as the global IMAGE layout 43973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 43983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 43993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto cb_image_data : pCB->imageLayoutMap) { 44003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkImageLayout imageLayout; 44013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) { 44023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 44033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 44043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".", 44053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(cb_image_data.first)); 44063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 44073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) { 44083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO: Set memory invalid which is in mem_tracker currently 44093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (imageLayout != cb_image_data.second.initialLayout) { 44103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (cb_image_data.first.hasSubresource) { 44113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg( 44123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 44133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 44143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], " 44153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "with layout %s when first use is %s.", 44163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask, 44173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_image_data.first.subresource.arrayLayer, 44183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout), 44193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string_VkImageLayout(cb_image_data.second.initialLayout)); 44203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 44213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg( 44223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 44233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 44243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when " 44253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "first use is %s.", 44263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout), 44273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch string_VkImageLayout(cb_image_data.second.initialLayout)); 44283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout); 44313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 44343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 44353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 44363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Loop through bound objects and increment their in_use counts 44373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For any unknown objects, flag an error 44383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) { 44393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 44403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DRAW_STATE_ERROR error_code = DRAWSTATE_NONE; 44413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch BASE_NODE *base_obj = nullptr; 44423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto obj : cb_node->object_bindings) { 44433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (obj.type) { 44443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: { 44453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle)); 44463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET; 44473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: { 44503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(obj.handle)); 44513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_SAMPLER; 44523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: { 44553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle)); 44563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_QUERY_POOL; 44573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: { 44603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(obj.handle)); 44613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_PIPELINE; 44623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: { 44653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle)); 44663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_BUFFER; 44673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: { 44703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle)); 44713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_BUFFER_VIEW; 44723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: { 44753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle)); 44763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_IMAGE; 44773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: { 44803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle)); 44813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_IMAGE_VIEW; 44823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: { 44853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle)); 44863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_EVENT; 44873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: { 44903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle)); 44913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL; 44923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: { 44953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle)); 44963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_COMMAND_POOL; 44973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 44983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 44993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: { 45003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle)); 45013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_FRAMEBUFFER; 45023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 45033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: { 45053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle)); 45063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_RENDERPASS; 45073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 45083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: { 45103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle)); 45113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code = DRAWSTATE_INVALID_DEVICE_MEMORY; 45123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 45133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 45153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : Merge handling of other objects types into this code 45163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 45173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!base_obj) { 45193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= 45203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS", 45213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle); 45223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 45233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj->in_use.fetch_add(1); 45243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 45273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 45283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 45293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Track which resources are in-flight by atomically incrementing their "in_use" count 45303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) { 45313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 45323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 45333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->in_use.fetch_add(1); 45343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer); 45353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 45363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below 45373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node); 45383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : We should be able to remove the NULL look-up checks from the code below as long as 45393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state 45403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // should then be flagged prior to calling this function 4541f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch for (auto drawDataElement : cb_node->drawData) { 4542f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch for (auto buffer : drawDataElement.buffers) { 4543f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch auto buffer_state = getBufferState(dev_data, buffer); 4544f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if (!buffer_state) { 4545f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 4546f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS", 4547f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer)); 45483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 45493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch buffer_state->in_use.fetch_add(1); 4550f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 4551f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 4552f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 4553f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch for (auto event : cb_node->writeEventsBeforeWait) { 4554f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch auto event_state = getEventNode(dev_data, event); 45553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (event_state) 4556f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch event_state->write_in_use++; 45573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 45593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 45603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 45613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Note: This function assumes that the global lock is held by the calling thread. 45623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For the given queue, verify the queue state up to the given seq number. 45633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Currently the only check is to make sure that if there are events to be waited on prior to 45643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// a QueryReset, make sure that all such events have been signalled. 45653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *queue, uint64_t seq) { 45663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 45673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto queue_seq = queue->seq; 45683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<VkQueue, uint64_t> other_queue_seqs; 45693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto sub_it = queue->submissions.begin(); 45703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while (queue_seq < seq) { 45713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto &wait : sub_it->waitSemaphores) { 45723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto &last_seq = other_queue_seqs[wait.queue]; 45733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch last_seq = std::max(last_seq, wait.seq); 45743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto cb : sub_it->cbs) { 45763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto cb_node = getCBNode(dev_data, cb); 45773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (cb_node) { 45783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) { 45793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto event : queryEventsPair.second) { 45803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->eventMap[event].needsSignaled) { 45813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 45823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS", 45833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot get query results on queryPool 0x%" PRIx64 45843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".", 45853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event)); 45863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch sub_it++; 45923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch queue_seq++; 45933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto qs : other_queue_seqs) { 45953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, qs.first), qs.second); 45963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 45973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 45983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 45993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// When the given fence is retired, verify outstanding queue operations through the point of the fence 46013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) { 46023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto fence_state = getFenceNode(dev_data, fence); 46033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_NULL_HANDLE != fence_state->signaler.first) { 46043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second); 46053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 46073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 46083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// TODO: nuke this completely. 46103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers 46113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) { 46123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Pull it off of global list initially, but if we find it in any other queue list, add it back in 46133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer); 46143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->in_use.fetch_sub(1); 46153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pCB->in_use.load()) { 46163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->globalInFlightCmdBuffers.erase(cmd_buffer); 46173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 46193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Decrement in-use count for objects bound to command buffer 46213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) { 46223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch BASE_NODE *base_obj = nullptr; 46233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto obj : cb_node->object_bindings) { 46243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj = GetStateStructPtrFromObject(dev_data, obj); 46253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (base_obj) { 46263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch base_obj->in_use.fetch_sub(1); 46273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 46303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) { 46323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unordered_map<VkQueue, uint64_t> otherQueueSeqs; 46333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Roll this queue forward, one submission at a time. 46353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch while (pQueue->seq < seq) { 46363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto & submission = pQueue->submissions.front(); 46373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto & wait : submission.waitSemaphores) { 46393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore); 46403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pSemaphore) { 46413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSemaphore->in_use.fetch_sub(1); 46423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto & lastSeq = otherQueueSeqs[wait.queue]; 46443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lastSeq = std::max(lastSeq, wait.seq); 46453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto & semaphore : submission.signalSemaphores) { 46483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 46493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pSemaphore) { 46503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSemaphore->in_use.fetch_sub(1); 46513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto cb : submission.cbs) { 46553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto cb_node = getCBNode(dev_data, cb); 46563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!cb_node) { 46573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch continue; 46583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // First perform decrement on general case bound objects 46603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DecrementBoundResources(dev_data, cb_node); 46613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto drawDataElement : cb_node->drawData) { 46623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto buffer : drawDataElement.buffers) { 46633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto buffer_state = getBufferState(dev_data, buffer); 46643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (buffer_state) { 46653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch buffer_state->in_use.fetch_sub(1); 46663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto event : cb_node->writeEventsBeforeWait) { 46703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto eventNode = dev_data->eventMap.find(event); 46713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (eventNode != dev_data->eventMap.end()) { 46723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch eventNode->second.write_in_use--; 46733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto queryStatePair : cb_node->queryToStateMap) { 46763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second; 46773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto eventStagePair : cb_node->eventToStageMap) { 46793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second; 46803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch removeInFlightCmdBuffer(dev_data, cb); 46833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pFence = getFenceNode(dev_data, submission.fence); 46863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pFence) { 46873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pFence->state = FENCE_RETIRED; 46883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pQueue->submissions.pop_front(); 46913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pQueue->seq++; 46923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 46943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Roll other queues forward to the highest seq we saw a wait for 46953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto qs : otherQueueSeqs) { 46963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch RetireWorkOnQueue(dev_data, getQueueState(dev_data, qs.first), qs.second); 46973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 46983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 46993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Submit a fence to a queue, delimiting previous fences and previous untracked 47023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// work by it. 47033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) { 47043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pFence->state = FENCE_INFLIGHT; 47053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pFence->signaler.first = pQueue->queue; 47063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount; 47073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 47083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 47103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 47113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) && 47123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { 47133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 47143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 0, __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 47153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Command Buffer 0x%p is already in use and is not marked for simultaneous use.", pCB->commandBuffer); 47163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 47173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 47183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 47193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) { 47213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 47223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.command_buffer_state) 47233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 47243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once 47253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) { 47263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 47273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS", 47283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT " 47293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "set, but has been submitted 0x%" PRIxLEAST64 " times.", 47303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->commandBuffer, pCB->submitCount); 47313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 47323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Validate that cmd buffers have been updated 47333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (CB_RECORDED != pCB->state) { 47343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (CB_INVALID == pCB->state) { 47353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Inform app of reason CB invalid 47363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto obj : pCB->broken_bindings) { 47373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const char *type_str = object_type_to_string(obj.type); 47383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB 47393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const char *cause_str = 47403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed"; 47413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= 47433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 47443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 47453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "You are submitting command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.", 47463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->commandBuffer, type_str, obj.handle, cause_str); 47473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 47483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { // Flag error for using CB w/o vkEndCommandBuffer() called 47493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 47503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS", 47513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!", pCB->commandBuffer, 47523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch call_source); 47533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 47543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 47553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 47563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 47573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Validate that queueFamilyIndices of primary command buffers match this queue 47593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Secondary command buffers were previously validated in vkCmdExecuteCommands(). 47603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) { 47613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 47623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool); 47633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto queue_state = getQueueState(dev_data, queue); 47643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pPool && queue_state && (pPool->queueFamilyIndex != queue_state->queueFamilyIndex)) { 47663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 47673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS", 47683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "vkQueueSubmit: Primary command buffer 0x%p" 47693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " created in queue family %d is being submitted on queue 0x%p from queue family %d.", 47703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex); 47713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 47723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 47743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 47753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 47773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Track in-use for resources off of primary and any secondary CBs 47783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 47793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing 47813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // on device 47823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB); 47833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= validateAndIncrementResources(dev_data, pCB); 47853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 47863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!pCB->secondaryCommandBuffers.empty()) { 47873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) { 47883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer); 47893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= validateAndIncrementResources(dev_data, pSubCB); 47903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) && 47913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { 47923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 47933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS", 47943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Commandbuffer 0x%p was submitted with secondary buffer 0x%p" 47953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " but that buffer has subsequently been bound to " 47963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "primary cmd buffer 0x%p" 47973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", 47983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pCB->commandBuffer, secondaryCmdBuffer, pSubCB->primaryCommandBuffer); 47993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()"); 48043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 48063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 48073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool 48093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) 48103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch{ 48113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 48123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pFence) { 48143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pFence->state == FENCE_INFLIGHT) { 48153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 48163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 48173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence)); 48183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch else if (pFence->state == FENCE_RETIRED) { 48213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 48223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 48233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 48243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted", 48253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(pFence->fence)); 48263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 48303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 48313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR VkResult VKAPI_CALL 48343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) { 48353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 48363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 48373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 48383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 48393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pQueue = getQueueState(dev_data, queue); 48413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pFence = getFenceNode(dev_data, fence); 48423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= ValidateFenceForSubmit(dev_data, pFence); 48433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (skip_call) { 48453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return VK_ERROR_VALIDATION_FAILED_EXT; 48463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Mark the fence in-use. 48493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pFence) { 48503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SubmitFence(pQueue, pFence, std::max(1u, submitCount)); 48513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 48533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Now verify each individual submit 48543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { 48553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkSubmitInfo *submit = &pSubmits[submit_idx]; 48563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch vector<SEMAPHORE_WAIT> semaphore_waits; 48573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch vector<VkSemaphore> semaphore_signals; 48583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { 48593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkSemaphore semaphore = submit->pWaitSemaphores[i]; 48603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 48613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pSemaphore) { 48623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pSemaphore->signaled) { 48633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pSemaphore->signaler.first != VK_NULL_HANDLE) { 48643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second}); 48653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSemaphore->in_use.fetch_add(1); 48663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSemaphore->signaler.first = VK_NULL_HANDLE; 48683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSemaphore->signaled = false; 48693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 48703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 48713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 48723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 48733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue, 48743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(semaphore)); 48753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { 48793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkSemaphore semaphore = submit->pSignalSemaphores[i]; 48803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 48813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pSemaphore) { 48823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pSemaphore->signaled) { 48833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= 48843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 48853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 48863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Queue 0x%p is signaling semaphore 0x%" PRIx64 48873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " that has already been signaled but not waited on by queue 0x%" PRIx64 ".", 48883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch queue, reinterpret_cast<const uint64_t &>(semaphore), 48893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(pSemaphore->signaler.first)); 48903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else { 48913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSemaphore->signaler.first = queue; 48923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1; 48933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSemaphore->signaled = true; 48943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pSemaphore->in_use.fetch_add(1); 48953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch semaphore_signals.push_back(semaphore); 48963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 48993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::vector<VkCommandBuffer> cbs; 49013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < submit->commandBufferCount; i++) { 49033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto cb_node = getCBNode(dev_data, submit->pCommandBuffers[i]); 49043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= ValidateCmdBufImageLayouts(dev_data, cb_node); 49053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (cb_node) { 49063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cbs.push_back(submit->pCommandBuffers[i]); 49073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) { 49083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cbs.push_back(secondaryCmdBuffer); 49093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch cb_node->submitCount++; // increment submit count 49123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= validatePrimaryCommandBufferState(dev_data, cb_node); 49133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= validateQueueFamilyIndices(dev_data, cb_node, queue); 49143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Potential early exit here as bad object state may crash in delayed function calls 49153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (skip_call) 49163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 49173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Call submit-time functions to validate/update state 49183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto &function : cb_node->validate_functions) { 49193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= function(); 49203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto &function : cb_node->eventUpdates) { 49223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= function(queue); 49233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto &function : cb_node->queryUpdates) { 49253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= function(queue); 49263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, 49313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE); 49323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pFence && !submitCount) { 49353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // If no submissions, but just dropping a fence on the end of the queue, 49363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // record an empty submission with just the fence, so we can determine 49373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // its completion. 49383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), 49393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::vector<SEMAPHORE_WAIT>(), 49403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::vector<VkSemaphore>(), 49413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch fence); 49423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 49453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!skip_call) 49463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence); 49473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 49493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 49503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateAllocateMemory(layer_data *dev_data) { 49523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 49533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) { 49543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 49553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<const uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_00611, "MEM", 49563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Number of currently valid memory objects is not less than the maximum allowed (%u). %s", 49573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount, 49583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch validation_error_map[VALIDATION_ERROR_00611]); 49593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 49613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 49623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) { 49643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo); 49653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return; 49663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 49673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, 49693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) { 49703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 49713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 49723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 49733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = PreCallValidateAllocateMemory(dev_data); 49743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!skip) { 49753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 49763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory); 49773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.lock(); 49783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_SUCCESS == result) { 49793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory); 49803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 49833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 49843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// For given obj node, if it is use, flag a validation error and return callback result, else return false 49863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochbool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, 49873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch UNIQUE_VALIDATION_ERROR_CODE error_code) { 49883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.object_in_use) 49893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 49903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 49913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (obj_node->in_use.load()) { 49923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__, 49933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s", 49943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch object_type_to_string(obj_struct.type), obj_struct.handle, validation_error_map[error_code]); 49953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 49963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 49973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 49983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 49993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) { 50003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *mem_info = getMemObjInfo(dev_data, mem); 50013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *obj_struct = {reinterpret_cast<uint64_t &>(mem), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT}; 50023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.free_memory) 50033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 50043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 50053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (*mem_info) { 50063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_00620); 50073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 50083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 50093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 50103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 50113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) { 50123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Clear mem binding for any bound objects 50133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto obj : mem_info->obj_bindings) { 50143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, MEMTRACK_FREED_MEM_REF, 50153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64, obj.handle, 50163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)mem_info->mem); 50173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch switch (obj.type) { 50183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: { 50193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto image_state = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle)); 50203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(image_state); // Any destroyed images should already be removed from bindings 50213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch image_state->binding.mem = MEMORY_UNBOUND; 50223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 50233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 50243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: { 50253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto buffer_state = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle)); 50263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(buffer_state); // Any destroyed buffers should already be removed from bindings 50273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch buffer_state->binding.mem = MEMORY_UNBOUND; 50283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch break; 50293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 50303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch default: 50313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Should only have buffer or image objects bound to memory 50323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(0); 50333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 50343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 50353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Any bound cmd buffers are now invalid 50363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct); 50373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->memObjMap.erase(mem); 50383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 50393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 50403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) { 50413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 50423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch DEVICE_MEM_INFO *mem_info = nullptr; 50433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_OBJECT obj_struct; 5044f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch std::unique_lock<std::mutex> lock(global_lock); 5045f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct); 5046f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if (!skip) { 5047f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch lock.unlock(); 5048f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch dev_data->dispatch_table.FreeMemory(device, mem, pAllocator); 5049f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch lock.lock(); 5050f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct); 5051f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 5052f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch} 5053f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch 5054f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch// Validate that given Map memory range is valid. This means that the memory should not already be mapped, 5055f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch// and that the size of the map range should be: 5056f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch// 1. Not zero 5057f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch// 2. Within the size of the memory allocation 5058f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdochstatic bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { 5059f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch bool skip_call = false; 50603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 50613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (size == 0) { 50623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 50633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 50643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "VkMapMemory: Attempting to map memory range of size zero"); 50653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 50663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 50673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto mem_element = my_data->memObjMap.find(mem); 5068bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch if (mem_element != my_data->memObjMap.end()) { 5069bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch auto mem_info = mem_element->second.get(); 5070bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch // It is an application error to call VkMapMemory on an object that is already mapped 5071bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch if (mem_info->mem_range.size != 0) { 5072bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5073bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 5074bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem); 5075bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch } 50763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 50773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Validate that offset + size is within object's allocationSize 50783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (size == VK_WHOLE_SIZE) { 50793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (offset >= mem_info->alloc_info.allocationSize) { 50803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 50813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, 50823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 50833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64, 5084f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize); 5085f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 5086f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } else { 5087f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if ((offset + size) > mem_info->alloc_info.allocationSize) { 5088f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch skip_call = 5089f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5090f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 5091f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset, 5092f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch size + offset, mem_info->alloc_info.allocationSize); 5093f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 5094f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 5095f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 5096f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch return skip_call; 5097f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch} 50983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 50993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { 51003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto mem_info = getMemObjInfo(my_data, mem); 51013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_info) { 51023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->mem_range.offset = offset; 51033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->mem_range.size = size; 51043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 51053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 51063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 51073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) { 51083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 51093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto mem_info = getMemObjInfo(my_data, mem); 51103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_info) { 51113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!mem_info->mem_range.size) { 51123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Valid Usage: memory must currently be mapped 51133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 51143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 51153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem); 51163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 51173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->mem_range.size = 0; 51183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_info->shadow_copy) { 51193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch free(mem_info->shadow_copy_base); 51203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->shadow_copy_base = 0; 51213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->shadow_copy = 0; 51223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 51233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 51243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 51253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 51263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 51273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Guard value for pad data 51283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic char NoncoherentMemoryFillValue = 0xb; 51293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 51303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, 51313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch void **ppData) { 51323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto mem_info = getMemObjInfo(dev_data, mem); 51333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (mem_info) { 51343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->p_driver_data = *ppData; 51353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t index = mem_info->alloc_info.memoryTypeIndex; 51363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) { 51373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->shadow_copy = 0; 5138f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } else { 5139f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch if (size == VK_WHOLE_SIZE) { 5140f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch size = mem_info->alloc_info.allocationSize - offset; 5141f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch } 5142f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment; 5143f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch assert(vk_safe_modulo(mem_info->shadow_pad_size, 5144f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0); 5145f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch // Ensure start of mapped region reflects hardware alignment constraints 5146f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment; 5147f91f0611dbaf29ca0f1d4aecb357ce243a19d2faBen Murdoch 51483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment. 51493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint64_t start_offset = offset % map_alignment; 51503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes. 51513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset)); 51523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 51533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch mem_info->shadow_copy = 51543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) & 51553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch ~(map_alignment - 1)) + start_offset; 51563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset, 51573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch map_alignment) == 0); 51583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 51593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size)); 51603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size; 51613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 51623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 51633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 51643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 51653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// Verify that state for fence being waited on is appropriate. That is, 51663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// a fence being waited on should not already be signaled and 51673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch// it should have been submitted on a queue or during acquire next image 51683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) { 51693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip_call = false; 51703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 51713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pFence = getFenceNode(dev_data, fence); 51723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pFence) { 51733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pFence->state == FENCE_UNSIGNALED) { 51743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 51753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 51763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during " 51773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "acquire next image.", 51783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch apiCall, reinterpret_cast<uint64_t &>(fence)); 51793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 51803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 51813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip_call; 51823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 51833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 51843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void RetireFence(layer_data *dev_data, VkFence fence) { 51853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto pFence = getFenceNode(dev_data, fence); 51863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (pFence->signaler.first != VK_NULL_HANDLE) { 51873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Fence signaller is a queue -- use this as proof that prior operations 51883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * on that queue have completed. 51893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 51903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch RetireWorkOnQueue(dev_data, getQueueState(dev_data, pFence->signaler.first), pFence->signaler.second); 51913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 51923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch else { 51933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch /* Fence signaller is the WSI. We're not tracking what the WSI op 51943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch * actually /was/ in CV yet, but we need to mark the fence as retired. 51953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch */ 51963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch pFence->state = FENCE_RETIRED; 51973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 51983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 51993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) { 52013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.wait_for_fences) 52023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 52033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 52043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < fence_count; i++) { 52053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences"); 52063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= VerifyQueueStateToFence(dev_data, fences[i]); 52073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 52083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 52093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 52103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) { 52123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // When we know that all fences are complete we can clean/remove their CBs 52133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((VK_TRUE == wait_all) || (1 == fence_count)) { 52143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < fence_count; i++) { 52153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch RetireFence(dev_data, fences[i]); 52163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 52173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 52183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // NOTE : Alternate case not handled here is when some fences have completed. In 52193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // this case for app to guarantee which fences completed it will have to call 52203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete. 52213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 52223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR VkResult VKAPI_CALL 52243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) { 52253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 52263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Verify fence status of submitted fences 52273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 52283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences); 52293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 52303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (skip) 52313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return VK_ERROR_VALIDATION_FAILED_EXT; 52323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout); 52343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (result == VK_SUCCESS) { 52363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.lock(); 52373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll); 52383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 52393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 52403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 52413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 52423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) { 52443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.get_fence_state) 52453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 52463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus"); 52473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 52483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); } 52503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) { 52523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 52533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 52543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = PreCallValidateGetFenceStatus(dev_data, fence); 52553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 52563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (skip) 52573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return VK_ERROR_VALIDATION_FAILED_EXT; 52583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence); 52603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (result == VK_SUCCESS) { 52613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.lock(); 52623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PostCallRecordGetFenceStatus(dev_data, fence); 52633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 52643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 52653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 52663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 52673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) { 52693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Add queue to tracking set only if it is new 52703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto result = dev_data->queues.emplace(queue); 52713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (result.second == true) { 52723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch QUEUE_STATE *queue_state = &dev_data->queueMap[queue]; 52733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch queue_state->queue = queue; 52743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch queue_state->queueFamilyIndex = q_family_index; 52753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch queue_state->seq = 0; 52763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 52773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 52783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, 52803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkQueue *pQueue) { 52813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 52823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue); 52833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::lock_guard<std::mutex> lock(global_lock); 52843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue); 52863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 52873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) { 52893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *queue_state = getQueueState(dev_data, queue); 52903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.queue_wait_idle) 52913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 52923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size()); 52933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 52943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) { 52963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size()); 52973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 52983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 52993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) { 53003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 53013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch QUEUE_STATE *queue_state = nullptr; 53023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 53033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state); 53043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 53053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (skip) 53063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return VK_ERROR_VALIDATION_FAILED_EXT; 53073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue); 53083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_SUCCESS == result) { 53093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.lock(); 53103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PostCallRecordQueueWaitIdle(dev_data, queue_state); 53113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 53123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 53133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return result; 53143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 53153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 53163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) { 53173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.device_wait_idle) 53183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 53193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 53203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto &queue : dev_data->queueMap) { 53213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size()); 53223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 53233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 53243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 53253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 53263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordDeviceWaitIdle(layer_data *dev_data) { 53273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto &queue : dev_data->queueMap) { 53283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size()); 53293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 53303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 53313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 53323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) { 53333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 53343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 53353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = PreCallValidateDeviceWaitIdle(dev_data); 53363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 53373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (skip) 53383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return VK_ERROR_VALIDATION_FAILED_EXT; 53393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device); 53403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (VK_SUCCESS == result) { 53413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.lock(); 53423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PostCallRecordDeviceWaitIdle(dev_data); 53433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 53443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 53451eae5e700a1e41eec085eac042831417f736879eBen Murdoch return result; 53463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 53473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 53483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) { 53493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *fence_node = getFenceNode(dev_data, fence); 53503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *obj_struct = {reinterpret_cast<uint64_t &>(fence), VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT}; 53513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.destroy_fence) 53523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 53533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 53543b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (*fence_node) { 53553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if ((*fence_node)->state == FENCE_INFLIGHT) { 53563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 53573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.", 53583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(fence)); 53593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 53603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 53613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 53623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 53633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 53643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); } 53653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 53663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) { 53673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 53683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Common data objects used pre & post call 53693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch FENCE_NODE *fence_node = nullptr; 53703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_OBJECT obj_struct; 53711eae5e700a1e41eec085eac042831417f736879eBen Murdoch std::unique_lock<std::mutex> lock(global_lock); 53723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct); 53733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 53743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!skip) { 53753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 53763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->dispatch_table.DestroyFence(device, fence, pAllocator); 53773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.lock(); 53783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PostCallRecordDestroyFence(dev_data, fence); 53793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 53803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 53813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 53823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node, 53833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_OBJECT *obj_struct) { 53843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *sema_node = getSemaphoreNode(dev_data, semaphore); 53853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *obj_struct = {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT}; 53863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.destroy_semaphore) 53873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 53883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 53893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (*sema_node) { 53903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_00199); 53913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 53923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 53933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 53943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 53953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); } 53963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 53971eae5e700a1e41eec085eac042831417f736879eBen MurdochVKAPI_ATTR void VKAPI_CALL 53983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) { 53993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 54003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch SEMAPHORE_NODE *sema_node; 54013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_OBJECT obj_struct; 54023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 54033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct); 54043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!skip) { 54053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 54063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator); 54073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.lock(); 54083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PostCallRecordDestroySemaphore(dev_data, semaphore); 54093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 54103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 54113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 54123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) { 54133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *event_state = getEventNode(dev_data, event); 54143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}; 54153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.destroy_event) 54163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 54173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 54183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (*event_state) { 54193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_00213); 54203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 54213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 54223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 54233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 54243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) { 54253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct); 54263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->eventMap.erase(event); 54273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 54283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 54293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) { 54303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 54313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch EVENT_STATE *event_state = nullptr; 54323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_OBJECT obj_struct; 54333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 54343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct); 54353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!skip) { 54363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 54373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->dispatch_table.DestroyEvent(device, event, pAllocator); 54383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.lock(); 5439bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct); 54403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 54413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 54423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 54433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state, 54443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_OBJECT *obj_struct) { 54453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *qp_state = getQueryPoolNode(dev_data, query_pool); 54463b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch *obj_struct = {reinterpret_cast<uint64_t &>(query_pool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}; 54473b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.destroy_query_pool) 54483b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 54493b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 54503b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (*qp_state) { 54513b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_01012); 54523b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 54533b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 5454bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8Ben Murdoch} 54553b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 54563b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state, VK_OBJECT obj_struct) { 54573b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct); 54583b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->queryPoolMap.erase(query_pool); 54593b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 54603b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 54613b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochVKAPI_ATTR void VKAPI_CALL 54623b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben MurdochDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) { 54633b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 54643b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch QUERY_POOL_NODE *qp_state = nullptr; 54653b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_OBJECT obj_struct; 54663b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch std::unique_lock<std::mutex> lock(global_lock); 54673b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct); 54683b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!skip) { 54693b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.unlock(); 54703b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator); 54713b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch lock.lock(); 54723b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct); 54733b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 54743b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 54753b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query, 54763b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t query_count, VkQueryResultFlags flags, 54773b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) { 54783b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto cmd_buffer : dev_data->globalInFlightCmdBuffers) { 54793b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto cb = getCBNode(dev_data, cmd_buffer); 54803b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto query_state_pair : cb->queryToStateMap) { 54813b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer); 54823b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 54833b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 54843b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (dev_data->instance_data->disabled.get_query_pool_results) 54853b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return false; 54863b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool skip = false; 54873b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < query_count; ++i) { 54883b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch QueryObject query = {query_pool, first_query + i}; 54893b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto qif_pair = queries_in_flight->find(query); 54903b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto query_state_pair = dev_data->queryToStateMap.find(query); 54913b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (query_state_pair != dev_data->queryToStateMap.end()) { 54923b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Available and in flight 54933b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() && 54943b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch query_state_pair->second) { 54953b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto cmd_buffer : qif_pair->second) { 54963b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto cb = getCBNode(dev_data, cmd_buffer); 54973b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query); 54983b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) { 54993b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 55003b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 55013b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.", 55023b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(query_pool), first_query + i); 55033b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 55043b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 55053b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Unavailable and in flight 55063b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() && 55073b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch !query_state_pair->second) { 55083b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // TODO : Can there be the same query in use by multiple command buffers in flight? 55093b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch bool make_available = false; 55103b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (auto cmd_buffer : qif_pair->second) { 55113b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto cb = getCBNode(dev_data, cmd_buffer); 55123b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch make_available |= cb->queryToStateMap[query]; 55133b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 55143b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) { 55153b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 55163b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 55173b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.", 55183b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(query_pool), first_query + i); 55193b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 55203b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Unavailable 55213b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) { 55223b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 55233b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 55243b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.", 55253b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(query_pool), first_query + i); 55263b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch // Uninitialized 55273b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } else if (query_state_pair == dev_data->queryToStateMap.end()) { 55283b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 55293b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 55303b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch "Cannot get query results on queryPool 0x%" PRIx64 55313b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch " with index %d as data has not been collected for this index.", 55323b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch (uint64_t)(query_pool), first_query + i); 55333b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 55343b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 55353b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch } 55363b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch return skip; 55373b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch} 55383b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch 55393b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdochstatic void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query, 55403b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch uint32_t query_count, 55413b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) { 55423b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch for (uint32_t i = 0; i < query_count; ++i) { 55433b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch QueryObject query = {query_pool, first_query + i}; 55443b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto qif_pair = queries_in_flight->find(query); 55453b9bc31999c9787eb726ecdbfd5796bfdec32a18Ben Murdoch auto query_state_pair = dev_data->queryToStateMap.find(query); 5546 if (query_state_pair != dev_data->queryToStateMap.end()) { 5547 // Available and in flight 5548 if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() && 5549 query_state_pair->second) { 5550 for (auto cmd_buffer : qif_pair->second) { 5551 auto cb = getCBNode(dev_data, cmd_buffer); 5552 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query); 5553 if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) { 5554 for (auto event : query_event_pair->second) { 5555 dev_data->eventMap[event].needsSignaled = true; 5556 } 5557 } 5558 } 5559 } 5560 } 5561 } 5562} 5563 5564VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, 5565 size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) { 5566 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5567 unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight; 5568 std::unique_lock<std::mutex> lock(global_lock); 5569 bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight); 5570 lock.unlock(); 5571 if (skip) 5572 return VK_ERROR_VALIDATION_FAILED_EXT; 5573 VkResult result = 5574 dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags); 5575 lock.lock(); 5576 PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight); 5577 lock.unlock(); 5578 return result; 5579} 5580 5581static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) { 5582 bool skip_call = false; 5583 auto buffer_state = getBufferState(my_data, buffer); 5584 if (!buffer_state) { 5585 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 5586 (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS", 5587 "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer)); 5588 } else { 5589 if (buffer_state->in_use.load()) { 5590 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 5591 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS", 5592 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer)); 5593 } 5594 } 5595 return skip_call; 5596} 5597 5598// Return true if given ranges intersect, else false 5599// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted 5600// in an error so not checking that here 5601// pad_ranges bool indicates a linear and non-linear comparison which requires padding 5602// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call 5603// may be set by the callback function so caller should merge in skip_call value if padding case is possible. 5604static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) { 5605 *skip_call = false; 5606 auto r1_start = range1->start; 5607 auto r1_end = range1->end; 5608 auto r2_start = range2->start; 5609 auto r2_end = range2->end; 5610 VkDeviceSize pad_align = 1; 5611 if (range1->linear != range2->linear) { 5612 pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity; 5613 } 5614 if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) 5615 return false; 5616 if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) 5617 return false; 5618 5619 if (range1->linear != range2->linear) { 5620 // In linear vs. non-linear case, warn of aliasing 5621 const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear"; 5622 const char *r1_type_str = range1->image ? "image" : "buffer"; 5623 const char *r2_linear_str = range2->linear ? "linear" : "non-linear"; 5624 const char *r2_type_str = range2->image ? "image" : "buffer"; 5625 auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT; 5626 *skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0, 5627 MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64 5628 " which may indicate a bug. For further info refer to the " 5629 "Buffer-Image Granularity section of the Vulkan specification. " 5630 "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/" 5631 "xhtml/vkspec.html#resources-bufferimagegranularity)", 5632 r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle); 5633 } 5634 // Ranges intersect 5635 return true; 5636} 5637// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses 5638static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) { 5639 // Create a local MEMORY_RANGE struct to wrap offset/size 5640 MEMORY_RANGE range_wrap; 5641 // Synch linear with range1 to avoid padding and potential validation error case 5642 range_wrap.linear = range1->linear; 5643 range_wrap.start = offset; 5644 range_wrap.end = end; 5645 bool tmp_bool; 5646 return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool); 5647} 5648// For given mem_info, set all ranges valid that intersect [offset-end] range 5649// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid 5650static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) { 5651 bool tmp_bool = false; 5652 MEMORY_RANGE map_range = {}; 5653 map_range.linear = true; 5654 map_range.start = offset; 5655 map_range.end = end; 5656 for (auto &handle_range_pair : mem_info->bound_ranges) { 5657 if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) { 5658 // TODO : WARN here if tmp_bool true? 5659 handle_range_pair.second.valid = true; 5660 } 5661 } 5662} 5663// Object with given handle is being bound to memory w/ given mem_info struct. 5664// Track the newly bound memory range with given memoryOffset 5665// Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear 5666// and non-linear range incorrectly overlap. 5667// Return true if an error is flagged and the user callback returns "true", otherwise false 5668// is_image indicates an image object, otherwise handle is for a buffer 5669// is_linear indicates a buffer or linear image 5670static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset, 5671 VkMemoryRequirements memRequirements, bool is_image, bool is_linear) { 5672 bool skip_call = false; 5673 MEMORY_RANGE range; 5674 5675 range.image = is_image; 5676 range.handle = handle; 5677 range.linear = is_linear; 5678 range.valid = mem_info->global_valid; 5679 range.memory = mem_info->mem; 5680 range.start = memoryOffset; 5681 range.size = memRequirements.size; 5682 range.end = memoryOffset + memRequirements.size - 1; 5683 range.aliases.clear(); 5684 // Update Memory aliasing 5685 // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we 5686 // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself 5687 std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges; 5688 for (auto &obj_range_pair : mem_info->bound_ranges) { 5689 auto check_range = &obj_range_pair.second; 5690 bool intersection_error = false; 5691 if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) { 5692 skip_call |= intersection_error; 5693 range.aliases.insert(check_range); 5694 tmp_alias_ranges.insert(check_range); 5695 } 5696 } 5697 mem_info->bound_ranges[handle] = std::move(range); 5698 for (auto tmp_range : tmp_alias_ranges) { 5699 tmp_range->aliases.insert(&mem_info->bound_ranges[handle]); 5700 } 5701 if (is_image) 5702 mem_info->bound_images.insert(handle); 5703 else 5704 mem_info->bound_buffers.insert(handle); 5705 5706 return skip_call; 5707} 5708 5709static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset, 5710 VkMemoryRequirements mem_reqs, bool is_linear) { 5711 return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear); 5712} 5713 5714static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset, 5715 VkMemoryRequirements mem_reqs) { 5716 return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true); 5717} 5718 5719// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info 5720// is_image indicates if handle is for image or buffer 5721// This function will also remove the handle-to-index mapping from the appropriate 5722// map and clean up any aliases for range being removed. 5723static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) { 5724 auto erase_range = &mem_info->bound_ranges[handle]; 5725 for (auto alias_range : erase_range->aliases) { 5726 alias_range->aliases.erase(erase_range); 5727 } 5728 erase_range->aliases.clear(); 5729 mem_info->bound_ranges.erase(handle); 5730 if (is_image) { 5731 mem_info->bound_images.erase(handle); 5732 } else { 5733 mem_info->bound_buffers.erase(handle); 5734 } 5735} 5736 5737static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); } 5738 5739static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); } 5740 5741static bool PreCallValidateDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE **buffer_state, 5742 VK_OBJECT *obj_struct) { 5743 *buffer_state = getBufferState(dev_data, buffer); 5744 *obj_struct = {reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT}; 5745 if (dev_data->instance_data->disabled.destroy_buffer) 5746 return false; 5747 bool skip = false; 5748 if (*buffer_state) { 5749 skip |= validateIdleBuffer(dev_data, buffer); 5750 } 5751 return skip; 5752} 5753 5754static void PostCallRecordDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VK_OBJECT obj_struct) { 5755 invalidateCommandBuffers(dev_data, buffer_state->cb_bindings, obj_struct); 5756 for (auto mem_binding : buffer_state->GetBoundMemory()) { 5757 auto mem_info = getMemObjInfo(dev_data, mem_binding); 5758 if (mem_info) { 5759 RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info); 5760 } 5761 } 5762 ClearMemoryObjectBindings(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT); 5763 dev_data->bufferMap.erase(buffer_state->buffer); 5764} 5765 5766VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, 5767 const VkAllocationCallbacks *pAllocator) { 5768 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5769 BUFFER_STATE *buffer_state = nullptr; 5770 VK_OBJECT obj_struct; 5771 std::unique_lock<std::mutex> lock(global_lock); 5772 bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct); 5773 if (!skip) { 5774 lock.unlock(); 5775 dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator); 5776 lock.lock(); 5777 PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct); 5778 } 5779} 5780 5781static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state, 5782 VK_OBJECT *obj_struct) { 5783 *buffer_view_state = getBufferViewState(dev_data, buffer_view); 5784 *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT}; 5785 if (dev_data->instance_data->disabled.destroy_buffer_view) 5786 return false; 5787 bool skip = false; 5788 if (*buffer_view_state) { 5789 skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct, VALIDATION_ERROR_00701); 5790 } 5791 return skip; 5792} 5793 5794static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state, 5795 VK_OBJECT obj_struct) { 5796 // Any bound cmd buffers are now invalid 5797 invalidateCommandBuffers(dev_data, buffer_view_state->cb_bindings, obj_struct); 5798 dev_data->bufferViewMap.erase(buffer_view); 5799} 5800 5801VKAPI_ATTR void VKAPI_CALL 5802DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) { 5803 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5804 // Common data objects used pre & post call 5805 BUFFER_VIEW_STATE *buffer_view_state = nullptr; 5806 VK_OBJECT obj_struct; 5807 std::unique_lock<std::mutex> lock(global_lock); 5808 // Validate state before calling down chain, update common data if we'll be calling down chain 5809 bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct); 5810 if (!skip) { 5811 lock.unlock(); 5812 dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator); 5813 lock.lock(); 5814 PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct); 5815 } 5816} 5817 5818static bool PreCallValidateDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE **image_state, VK_OBJECT *obj_struct) { 5819 *image_state = getImageState(dev_data, image); 5820 *obj_struct = {reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT}; 5821 if (dev_data->instance_data->disabled.destroy_image) 5822 return false; 5823 bool skip = false; 5824 if (*image_state) { 5825 skip |= ValidateObjectNotInUse(dev_data, *image_state, *obj_struct, VALIDATION_ERROR_00743); 5826 } 5827 return skip; 5828} 5829 5830static void PostCallRecordDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VK_OBJECT obj_struct) { 5831 invalidateCommandBuffers(dev_data, image_state->cb_bindings, obj_struct); 5832 // Clean up memory mapping, bindings and range references for image 5833 for (auto mem_binding : image_state->GetBoundMemory()) { 5834 auto mem_info = getMemObjInfo(dev_data, mem_binding); 5835 if (mem_info) { 5836 RemoveImageMemoryRange(obj_struct.handle, mem_info); 5837 } 5838 } 5839 ClearMemoryObjectBindings(dev_data, obj_struct.handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT); 5840 // Remove image from imageMap 5841 dev_data->imageMap.erase(image); 5842 5843 const auto &sub_entry = dev_data->imageSubresourceMap.find(image); 5844 if (sub_entry != dev_data->imageSubresourceMap.end()) { 5845 for (const auto &pair : sub_entry->second) { 5846 dev_data->imageLayoutMap.erase(pair); 5847 } 5848 dev_data->imageSubresourceMap.erase(sub_entry); 5849 } 5850} 5851 5852VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) { 5853 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5854 IMAGE_STATE *image_state = nullptr; 5855 VK_OBJECT obj_struct; 5856 std::unique_lock<std::mutex> lock(global_lock); 5857 bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct); 5858 if (!skip) { 5859 lock.unlock(); 5860 dev_data->dispatch_table.DestroyImage(device, image, pAllocator); 5861 lock.lock(); 5862 PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct); 5863 } 5864} 5865 5866static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits, 5867 const char *funcName) { 5868 bool skip_call = false; 5869 if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) { 5870 skip_call = log_msg( 5871 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5872 reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT", 5873 "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory " 5874 "type (0x%X) of this memory object 0x%" PRIx64 ".", 5875 funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, reinterpret_cast<const uint64_t &>(mem_info->mem)); 5876 } 5877 return skip_call; 5878} 5879 5880VKAPI_ATTR VkResult VKAPI_CALL 5881BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) { 5882 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5883 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 5884 std::unique_lock<std::mutex> lock(global_lock); 5885 // Track objects tied to memory 5886 uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer); 5887 bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory"); 5888 auto buffer_state = getBufferState(dev_data, buffer); 5889 if (buffer_state) { 5890 if (!buffer_state->memory_requirements_checked) { 5891 // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling 5892 // BindBufferMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from 5893 // vkGetBufferMemoryRequirements() 5894 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 5895 buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS", 5896 "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64 5897 " but vkGetBufferMemoryRequirements() has not been called on that buffer.", 5898 buffer_handle); 5899 // Make the call for them so we can verify the state 5900 lock.unlock(); 5901 dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &buffer_state->requirements); 5902 lock.lock(); 5903 } 5904 buffer_state->binding.mem = mem; 5905 buffer_state->binding.offset = memoryOffset; 5906 buffer_state->binding.size = buffer_state->requirements.size; 5907 5908 // Track and validate bound memory range information 5909 auto mem_info = getMemObjInfo(dev_data, mem); 5910 if (mem_info) { 5911 skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements); 5912 skip_call |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "BindBufferMemory"); 5913 } 5914 5915 // Validate memory requirements alignment 5916 if (vk_safe_modulo(memoryOffset, buffer_state->requirements.alignment) != 0) { 5917 skip_call |= 5918 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 5919 __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS", 5920 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the " 5921 "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 5922 ", returned from a call to vkGetBufferMemoryRequirements with buffer", 5923 memoryOffset, buffer_state->requirements.alignment); 5924 } 5925 5926 // Validate device limits alignments 5927 static const VkBufferUsageFlagBits usage_list[3] = { 5928 static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT), 5929 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, 5930 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT}; 5931 static const char *memory_type[3] = {"texel", 5932 "uniform", 5933 "storage"}; 5934 static const char *offset_name[3] = { 5935 "minTexelBufferOffsetAlignment", 5936 "minUniformBufferOffsetAlignment", 5937 "minStorageBufferOffsetAlignment" 5938 }; 5939 5940 // Keep this one fresh! 5941 const VkDeviceSize offset_requirement[3] = { 5942 dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment, 5943 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment, 5944 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment 5945 }; 5946 VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage; 5947 5948 for (int i = 0; i < 3; i++) { 5949 if (usage & usage_list[i]) { 5950 if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) { 5951 skip_call |= 5952 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 5953 0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS", 5954 "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of " 5955 "device limit %s 0x%" PRIxLEAST64, 5956 memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]); 5957 } 5958 } 5959 } 5960 } 5961 lock.unlock(); 5962 if (!skip_call) { 5963 result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset); 5964 } 5965 return result; 5966} 5967 5968VKAPI_ATTR void VKAPI_CALL 5969GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) { 5970 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5971 dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements); 5972 auto buffer_state = getBufferState(dev_data, buffer); 5973 if (buffer_state) { 5974 buffer_state->requirements = *pMemoryRequirements; 5975 buffer_state->memory_requirements_checked = true; 5976 } 5977} 5978 5979VKAPI_ATTR void VKAPI_CALL 5980GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) { 5981 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5982 dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements); 5983 auto image_state = getImageState(dev_data, image); 5984 if (image_state) { 5985 image_state->requirements = *pMemoryRequirements; 5986 image_state->memory_requirements_checked = true; 5987 } 5988} 5989 5990static bool PreCallValidateDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state, 5991 VK_OBJECT *obj_struct) { 5992 *image_view_state = getImageViewState(dev_data, image_view); 5993 *obj_struct = {reinterpret_cast<uint64_t &>(image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT}; 5994 if (dev_data->instance_data->disabled.destroy_image_view) 5995 return false; 5996 bool skip = false; 5997 if (*image_view_state) { 5998 skip |= ValidateObjectNotInUse(dev_data, *image_view_state, *obj_struct, VALIDATION_ERROR_00776); 5999 } 6000 return skip; 6001} 6002 6003static void PostCallRecordDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state, 6004 VK_OBJECT obj_struct) { 6005 // Any bound cmd buffers are now invalid 6006 invalidateCommandBuffers(dev_data, image_view_state->cb_bindings, obj_struct); 6007 dev_data->imageViewMap.erase(image_view); 6008} 6009 6010VKAPI_ATTR void VKAPI_CALL 6011DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) { 6012 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6013 // Common data objects used pre & post call 6014 IMAGE_VIEW_STATE *image_view_state = nullptr; 6015 VK_OBJECT obj_struct; 6016 std::unique_lock<std::mutex> lock(global_lock); 6017 bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct); 6018 if (!skip) { 6019 lock.unlock(); 6020 dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator); 6021 lock.lock(); 6022 PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct); 6023 } 6024} 6025 6026VKAPI_ATTR void VKAPI_CALL 6027DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) { 6028 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6029 6030 std::unique_lock<std::mutex> lock(global_lock); 6031 my_data->shaderModuleMap.erase(shaderModule); 6032 lock.unlock(); 6033 6034 my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator); 6035} 6036 6037static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state, 6038 VK_OBJECT *obj_struct) { 6039 *pipeline_state = getPipelineState(dev_data, pipeline); 6040 *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}; 6041 if (dev_data->instance_data->disabled.destroy_pipeline) 6042 return false; 6043 bool skip = false; 6044 if (*pipeline_state) { 6045 skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555); 6046 } 6047 return skip; 6048} 6049 6050static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state, 6051 VK_OBJECT obj_struct) { 6052 // Any bound cmd buffers are now invalid 6053 invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct); 6054 dev_data->pipelineMap.erase(pipeline); 6055} 6056 6057VKAPI_ATTR void VKAPI_CALL 6058DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) { 6059 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6060 PIPELINE_STATE *pipeline_state = nullptr; 6061 VK_OBJECT obj_struct; 6062 std::unique_lock<std::mutex> lock(global_lock); 6063 bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct); 6064 if (!skip) { 6065 lock.unlock(); 6066 dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator); 6067 lock.lock(); 6068 PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct); 6069 } 6070} 6071 6072VKAPI_ATTR void VKAPI_CALL 6073DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) { 6074 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6075 std::unique_lock<std::mutex> lock(global_lock); 6076 dev_data->pipelineLayoutMap.erase(pipelineLayout); 6077 lock.unlock(); 6078 6079 dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator); 6080} 6081 6082static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state, 6083 VK_OBJECT *obj_struct) { 6084 *sampler_state = getSamplerState(dev_data, sampler); 6085 *obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT}; 6086 if (dev_data->instance_data->disabled.destroy_sampler) 6087 return false; 6088 bool skip = false; 6089 if (*sampler_state) { 6090 skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_00837); 6091 } 6092 return skip; 6093} 6094 6095static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state, 6096 VK_OBJECT obj_struct) { 6097 // Any bound cmd buffers are now invalid 6098 if (sampler_state) 6099 invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct); 6100 dev_data->samplerMap.erase(sampler); 6101} 6102 6103VKAPI_ATTR void VKAPI_CALL 6104DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) { 6105 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6106 SAMPLER_STATE *sampler_state = nullptr; 6107 VK_OBJECT obj_struct; 6108 std::unique_lock<std::mutex> lock(global_lock); 6109 bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct); 6110 if (!skip) { 6111 lock.unlock(); 6112 dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator); 6113 lock.lock(); 6114 PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct); 6115 } 6116} 6117 6118static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) { 6119 dev_data->descriptorSetLayoutMap.erase(ds_layout); 6120} 6121 6122VKAPI_ATTR void VKAPI_CALL 6123DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) { 6124 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6125 dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator); 6126 std::unique_lock<std::mutex> lock(global_lock); 6127 PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout); 6128} 6129 6130static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool, 6131 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) { 6132 *desc_pool_state = getDescriptorPoolState(dev_data, pool); 6133 *obj_struct = {reinterpret_cast<uint64_t &>(pool), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT}; 6134 if (dev_data->instance_data->disabled.destroy_descriptor_pool) 6135 return false; 6136 bool skip = false; 6137 if (*desc_pool_state) { 6138 skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901); 6139 } 6140 return skip; 6141} 6142 6143static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool, 6144 DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) { 6145 // Any bound cmd buffers are now invalid 6146 invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct); 6147 // Free sets that were in this pool 6148 for (auto ds : desc_pool_state->sets) { 6149 freeDescriptorSet(dev_data, ds); 6150 } 6151 dev_data->descriptorPoolMap.erase(descriptorPool); 6152} 6153 6154VKAPI_ATTR void VKAPI_CALL 6155DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { 6156 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6157 DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr; 6158 VK_OBJECT obj_struct; 6159 std::unique_lock<std::mutex> lock(global_lock); 6160 bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct); 6161 if (!skip) { 6162 lock.unlock(); 6163 dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator); 6164 lock.lock(); 6165 PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct); 6166 } 6167} 6168// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result 6169// If this is a secondary command buffer, then make sure its primary is also in-flight 6170// If primary is not in-flight, then remove secondary from global in-flight set 6171// This function is only valid at a point when cmdBuffer is being reset or freed 6172static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action, 6173 UNIQUE_VALIDATION_ERROR_CODE error_code) { 6174 bool skip_call = false; 6175 if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) { 6176 // Primary CB or secondary where primary is also in-flight is an error 6177 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) || 6178 (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) { 6179 skip_call |= 6180 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6181 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, error_code, "DS", 6182 "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer, 6183 validation_error_map[error_code]); 6184 } 6185 } 6186 return skip_call; 6187} 6188 6189// Iterate over all cmdBuffers in given commandPool and verify that each is not in use 6190static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action, 6191 UNIQUE_VALIDATION_ERROR_CODE error_code) { 6192 bool skip_call = false; 6193 for (auto cmd_buffer : pPool->commandBuffers) { 6194 if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) { 6195 skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action, error_code); 6196 } 6197 } 6198 return skip_call; 6199} 6200 6201static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) { 6202 for (auto cmd_buffer : pPool->commandBuffers) { 6203 dev_data->globalInFlightCmdBuffers.erase(cmd_buffer); 6204 } 6205} 6206 6207VKAPI_ATTR void VKAPI_CALL 6208FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) { 6209 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6210 bool skip_call = false; 6211 std::unique_lock<std::mutex> lock(global_lock); 6212 6213 for (uint32_t i = 0; i < commandBufferCount; i++) { 6214 auto cb_node = getCBNode(dev_data, pCommandBuffers[i]); 6215 // Delete CB information structure, and remove from commandBufferMap 6216 if (cb_node) { 6217 skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_00096); 6218 } 6219 } 6220 6221 if (skip_call) 6222 return; 6223 6224 auto pPool = getCommandPoolNode(dev_data, commandPool); 6225 for (uint32_t i = 0; i < commandBufferCount; i++) { 6226 auto cb_node = getCBNode(dev_data, pCommandBuffers[i]); 6227 // Delete CB information structure, and remove from commandBufferMap 6228 if (cb_node) { 6229 dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer); 6230 // reset prior to delete for data clean-up 6231 resetCB(dev_data, cb_node->commandBuffer); 6232 dev_data->commandBufferMap.erase(cb_node->commandBuffer); 6233 delete cb_node; 6234 } 6235 6236 // Remove commandBuffer reference from commandPoolMap 6237 pPool->commandBuffers.remove(pCommandBuffers[i]); 6238 } 6239 lock.unlock(); 6240 6241 dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); 6242} 6243 6244VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, 6245 const VkAllocationCallbacks *pAllocator, 6246 VkCommandPool *pCommandPool) { 6247 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6248 6249 VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool); 6250 6251 if (VK_SUCCESS == result) { 6252 std::lock_guard<std::mutex> lock(global_lock); 6253 dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags; 6254 dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex; 6255 } 6256 return result; 6257} 6258 6259VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, 6260 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) { 6261 6262 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6263 bool skip = false; 6264 if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) { 6265 if (!dev_data->enabled_features.pipelineStatisticsQuery) { 6266 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 6267 __LINE__, VALIDATION_ERROR_01006, "DS", 6268 "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device " 6269 "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s", 6270 validation_error_map[VALIDATION_ERROR_01006]); 6271 } 6272 } 6273 6274 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 6275 if (!skip) { 6276 result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool); 6277 } 6278 if (result == VK_SUCCESS) { 6279 std::lock_guard<std::mutex> lock(global_lock); 6280 QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool]; 6281 qp_node->createInfo = *pCreateInfo; 6282 } 6283 return result; 6284} 6285 6286static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) { 6287 *cp_state = getCommandPoolNode(dev_data, pool); 6288 if (dev_data->instance_data->disabled.destroy_command_pool) 6289 return false; 6290 bool skip = false; 6291 if (*cp_state) { 6292 // Verify that command buffers in pool are complete (not in-flight) 6293 skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_00077); 6294 } 6295 return skip; 6296} 6297 6298static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) { 6299 // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap 6300 clearCommandBuffersInFlight(dev_data, cp_state); 6301 for (auto cb : cp_state->commandBuffers) { 6302 clear_cmd_buf_and_mem_references(dev_data, cb); 6303 auto cb_node = getCBNode(dev_data, cb); 6304 // Remove references to this cb_node prior to delete 6305 // TODO : Need better solution here, resetCB? 6306 for (auto obj : cb_node->object_bindings) { 6307 removeCommandBufferBinding(dev_data, &obj, cb_node); 6308 } 6309 for (auto framebuffer : cb_node->framebuffers) { 6310 auto fb_state = getFramebufferState(dev_data, framebuffer); 6311 if (fb_state) 6312 fb_state->cb_bindings.erase(cb_node); 6313 } 6314 dev_data->commandBufferMap.erase(cb); // Remove this command buffer 6315 delete cb_node; // delete CB info structure 6316 } 6317 dev_data->commandPoolMap.erase(pool); 6318} 6319 6320// Destroy commandPool along with all of the commandBuffers allocated from that pool 6321VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { 6322 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6323 COMMAND_POOL_NODE *cp_state = nullptr; 6324 std::unique_lock<std::mutex> lock(global_lock); 6325 bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state); 6326 if (!skip) { 6327 lock.unlock(); 6328 dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator); 6329 lock.lock(); 6330 PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state); 6331 } 6332} 6333 6334VKAPI_ATTR VkResult VKAPI_CALL 6335ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) { 6336 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6337 bool skip_call = false; 6338 6339 std::unique_lock<std::mutex> lock(global_lock); 6340 auto pPool = getCommandPoolNode(dev_data, commandPool); 6341 skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_00072); 6342 lock.unlock(); 6343 6344 if (skip_call) 6345 return VK_ERROR_VALIDATION_FAILED_EXT; 6346 6347 VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags); 6348 6349 // Reset all of the CBs allocated from this pool 6350 if (VK_SUCCESS == result) { 6351 lock.lock(); 6352 clearCommandBuffersInFlight(dev_data, pPool); 6353 for (auto cmdBuffer : pPool->commandBuffers) { 6354 resetCB(dev_data, cmdBuffer); 6355 } 6356 lock.unlock(); 6357 } 6358 return result; 6359} 6360 6361VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) { 6362 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6363 bool skip_call = false; 6364 std::unique_lock<std::mutex> lock(global_lock); 6365 for (uint32_t i = 0; i < fenceCount; ++i) { 6366 auto pFence = getFenceNode(dev_data, pFences[i]); 6367 if (pFence && pFence->state == FENCE_INFLIGHT) { 6368 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 6369 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 6370 "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i])); 6371 } 6372 } 6373 lock.unlock(); 6374 6375 if (skip_call) 6376 return VK_ERROR_VALIDATION_FAILED_EXT; 6377 6378 VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences); 6379 6380 if (result == VK_SUCCESS) { 6381 lock.lock(); 6382 for (uint32_t i = 0; i < fenceCount; ++i) { 6383 auto pFence = getFenceNode(dev_data, pFences[i]); 6384 if (pFence) { 6385 pFence->state = FENCE_UNSIGNALED; 6386 } 6387 } 6388 lock.unlock(); 6389 } 6390 6391 return result; 6392} 6393 6394// For given cb_nodes, invalidate them and track object causing invalidation 6395void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) { 6396 for (auto cb_node : cb_nodes) { 6397 if (cb_node->state == CB_RECORDING) { 6398 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6399 (uint64_t)(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 6400 "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer); 6401 } 6402 cb_node->state = CB_INVALID; 6403 cb_node->broken_bindings.push_back(obj); 6404 } 6405} 6406 6407static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, 6408 FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) { 6409 *framebuffer_state = getFramebufferState(dev_data, framebuffer); 6410 *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT}; 6411 if (dev_data->instance_data->disabled.destroy_framebuffer) 6412 return false; 6413 bool skip = false; 6414 if (*framebuffer_state) { 6415 skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422); 6416 } 6417 return skip; 6418} 6419 6420static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state, 6421 VK_OBJECT obj_struct) { 6422 invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct); 6423 dev_data->frameBufferMap.erase(framebuffer); 6424} 6425 6426VKAPI_ATTR void VKAPI_CALL 6427DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) { 6428 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6429 FRAMEBUFFER_STATE *framebuffer_state = nullptr; 6430 VK_OBJECT obj_struct; 6431 std::unique_lock<std::mutex> lock(global_lock); 6432 bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct); 6433 if (!skip) { 6434 lock.unlock(); 6435 dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator); 6436 lock.lock(); 6437 PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct); 6438 } 6439} 6440 6441static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state, 6442 VK_OBJECT *obj_struct) { 6443 *rp_state = getRenderPassState(dev_data, render_pass); 6444 *obj_struct = {reinterpret_cast<uint64_t &>(render_pass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}; 6445 if (dev_data->instance_data->disabled.destroy_renderpass) 6446 return false; 6447 bool skip = false; 6448 if (*rp_state) { 6449 skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_00393); 6450 } 6451 return skip; 6452} 6453 6454static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state, 6455 VK_OBJECT obj_struct) { 6456 invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct); 6457 dev_data->renderPassMap.erase(render_pass); 6458} 6459 6460VKAPI_ATTR void VKAPI_CALL 6461DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { 6462 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6463 RENDER_PASS_STATE *rp_state = nullptr; 6464 VK_OBJECT obj_struct; 6465 std::unique_lock<std::mutex> lock(global_lock); 6466 bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct); 6467 if (!skip) { 6468 lock.unlock(); 6469 dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator); 6470 lock.lock(); 6471 PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct); 6472 } 6473} 6474 6475VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, 6476 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) { 6477 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6478 // TODO: Add check for VALIDATION_ERROR_00658 6479 // TODO: Add check for VALIDATION_ERROR_00666 6480 // TODO: Add check for VALIDATION_ERROR_00667 6481 // TODO: Add check for VALIDATION_ERROR_00668 6482 // TODO: Add check for VALIDATION_ERROR_00669 6483 VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer); 6484 6485 if (VK_SUCCESS == result) { 6486 std::lock_guard<std::mutex> lock(global_lock); 6487 // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid 6488 dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_STATE>(new BUFFER_STATE(*pBuffer, pCreateInfo)))); 6489 } 6490 return result; 6491} 6492 6493static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) { 6494 bool skip_call = false; 6495 BUFFER_STATE *buffer_state = getBufferState(dev_data, pCreateInfo->buffer); 6496 // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time 6497 if (buffer_state) { 6498 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCreateBufferView()"); 6499 // In order to create a valid buffer view, the buffer must have been created with at least one of the 6500 // following flags: UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT 6501 skip_call |= ValidateBufferUsageFlags( 6502 dev_data, buffer_state, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false, 6503 VALIDATION_ERROR_00694, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT"); 6504 } 6505 return skip_call; 6506} 6507 6508VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo, 6509 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) { 6510 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6511 std::unique_lock<std::mutex> lock(global_lock); 6512 bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo); 6513 lock.unlock(); 6514 if (skip_call) 6515 return VK_ERROR_VALIDATION_FAILED_EXT; 6516 VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView); 6517 if (VK_SUCCESS == result) { 6518 lock.lock(); 6519 dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo)); 6520 lock.unlock(); 6521 } 6522 return result; 6523} 6524 6525VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, 6526 const VkAllocationCallbacks *pAllocator, VkImage *pImage) { 6527 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6528 6529 VkResult result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage); 6530 6531 if (VK_SUCCESS == result) { 6532 std::lock_guard<std::mutex> lock(global_lock); 6533 IMAGE_LAYOUT_NODE image_state; 6534 image_state.layout = pCreateInfo->initialLayout; 6535 image_state.format = pCreateInfo->format; 6536 dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_STATE>(new IMAGE_STATE(*pImage, pCreateInfo)))); 6537 ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()}; 6538 dev_data->imageSubresourceMap[*pImage].push_back(subpair); 6539 dev_data->imageLayoutMap[subpair] = image_state; 6540 } 6541 return result; 6542} 6543 6544static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) { 6545 /* expects global_lock to be held by caller */ 6546 6547 auto image_state = getImageState(dev_data, image); 6548 if (image_state) { 6549 /* If the caller used the special values VK_REMAINING_MIP_LEVELS and 6550 * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to 6551 * the actual values. 6552 */ 6553 if (range->levelCount == VK_REMAINING_MIP_LEVELS) { 6554 range->levelCount = image_state->createInfo.mipLevels - range->baseMipLevel; 6555 } 6556 6557 if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) { 6558 range->layerCount = image_state->createInfo.arrayLayers - range->baseArrayLayer; 6559 } 6560 } 6561} 6562 6563// Return the correct layer/level counts if the caller used the special 6564// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS. 6565static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range, 6566 VkImage image) { 6567 /* expects global_lock to be held by caller */ 6568 6569 *levels = range.levelCount; 6570 *layers = range.layerCount; 6571 auto image_state = getImageState(dev_data, image); 6572 if (image_state) { 6573 if (range.levelCount == VK_REMAINING_MIP_LEVELS) { 6574 *levels = image_state->createInfo.mipLevels - range.baseMipLevel; 6575 } 6576 if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) { 6577 *layers = image_state->createInfo.arrayLayers - range.baseArrayLayer; 6578 } 6579 } 6580} 6581 6582// For the given format verify that the aspect masks make sense 6583static bool ValidateImageAspectMask(layer_data *dev_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask, 6584 const char *func_name) { 6585 bool skip = false; 6586 if (vk_format_is_color(format)) { 6587 if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) { 6588 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 6589 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", 6590 "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name, 6591 validation_error_map[VALIDATION_ERROR_00741]); 6592 } 6593 if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) { 6594 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 6595 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", 6596 "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name, 6597 validation_error_map[VALIDATION_ERROR_00741]); 6598 } 6599 } else if (vk_format_is_depth_and_stencil(format)) { 6600 if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) { 6601 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 6602 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", "%s: Depth/stencil image formats must have " 6603 "at least one of VK_IMAGE_ASPECT_DEPTH_BIT " 6604 "and VK_IMAGE_ASPECT_STENCIL_BIT set. %s", 6605 func_name, validation_error_map[VALIDATION_ERROR_00741]); 6606 } 6607 if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) { 6608 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 6609 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", 6610 "%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and " 6611 "VK_IMAGE_ASPECT_STENCIL_BIT set. %s", 6612 func_name, validation_error_map[VALIDATION_ERROR_00741]); 6613 } 6614 } else if (vk_format_is_depth_only(format)) { 6615 if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) { 6616 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 6617 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", 6618 "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name, 6619 validation_error_map[VALIDATION_ERROR_00741]); 6620 } 6621 if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) { 6622 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 6623 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", 6624 "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name, 6625 validation_error_map[VALIDATION_ERROR_00741]); 6626 } 6627 } else if (vk_format_is_stencil_only(format)) { 6628 if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) { 6629 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 6630 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", 6631 "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name, 6632 validation_error_map[VALIDATION_ERROR_00741]); 6633 } 6634 if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) { 6635 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 6636 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", 6637 "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name, 6638 validation_error_map[VALIDATION_ERROR_00741]); 6639 } 6640 } 6641 return skip; 6642} 6643 6644static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info) { 6645 bool skip = false; 6646 IMAGE_STATE *image_state = getImageState(dev_data, create_info->image); 6647 if (image_state) { 6648 skip |= ValidateImageUsageFlags( 6649 dev_data, image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | 6650 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 6651 false, -1, "vkCreateImageView()", 6652 "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT"); 6653 // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time 6654 skip |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCreateImageView()"); 6655 // Checks imported from image layer 6656 if (create_info->subresourceRange.baseMipLevel >= image_state->createInfo.mipLevels) { 6657 std::stringstream ss; 6658 ss << "vkCreateImageView called with baseMipLevel " << create_info->subresourceRange.baseMipLevel << " for image " 6659 << create_info->image << " that only has " << image_state->createInfo.mipLevels << " mip levels."; 6660 skip |= 6661 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6662 VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]); 6663 } 6664 if (create_info->subresourceRange.baseArrayLayer >= image_state->createInfo.arrayLayers) { 6665 std::stringstream ss; 6666 ss << "vkCreateImageView called with baseArrayLayer " << create_info->subresourceRange.baseArrayLayer << " for image " 6667 << create_info->image << " that only has " << image_state->createInfo.arrayLayers << " array layers."; 6668 skip |= 6669 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6670 VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]); 6671 } 6672 // TODO: Need new valid usage language for levelCount == 0 & layerCount == 0 6673 if (!create_info->subresourceRange.levelCount) { 6674 std::stringstream ss; 6675 ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.levelCount."; 6676 skip |= 6677 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6678 VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]); 6679 } 6680 if (!create_info->subresourceRange.layerCount) { 6681 std::stringstream ss; 6682 ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.layerCount."; 6683 skip |= 6684 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6685 VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]); 6686 } 6687 6688 VkImageCreateFlags image_flags = image_state->createInfo.flags; 6689 VkFormat image_format = image_state->createInfo.format; 6690 VkFormat view_format = create_info->format; 6691 VkImageAspectFlags aspect_mask = create_info->subresourceRange.aspectMask; 6692 6693 // Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state 6694 if (image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) { 6695 // Format MUST be compatible (in the same format compatibility class) as the format the image was created with 6696 if (vk_format_get_compatibility_class(image_format) != vk_format_get_compatibility_class(view_format)) { 6697 std::stringstream ss; 6698 ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format) 6699 << " is not in the same format compatibility class as image (" << (uint64_t)create_info->image << ") format " 6700 << string_VkFormat(image_format) << ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT " 6701 << "can support ImageViews with differing formats but they must be in the same compatibility class."; 6702 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6703 VALIDATION_ERROR_02171, "IMAGE", "%s %s", ss.str().c_str(), 6704 validation_error_map[VALIDATION_ERROR_02171]); 6705 } 6706 } else { 6707 // Format MUST be IDENTICAL to the format the image was created with 6708 if (image_format != view_format) { 6709 std::stringstream ss; 6710 ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from image " 6711 << (uint64_t)create_info->image << " format " << string_VkFormat(image_format) 6712 << ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation."; 6713 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6714 VALIDATION_ERROR_02172, "IMAGE", "%s %s", ss.str().c_str(), 6715 validation_error_map[VALIDATION_ERROR_02172]); 6716 } 6717 } 6718 6719 // Validate correct image aspect bits for desired formats and format consistency 6720 skip |= ValidateImageAspectMask(dev_data, image_state->image, image_format, aspect_mask, "vkCreateImageView()"); 6721 if (vk_format_is_color(image_format) && !vk_format_is_color(view_format)) { 6722 std::stringstream ss; 6723 ss << "vkCreateImageView: The image view's format can differ from the parent image's format, but both must be " 6724 << "color formats. ImageFormat is " << string_VkFormat(image_format) << " ImageViewFormat is " 6725 << string_VkFormat(view_format); 6726 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 6727 (uint64_t)create_info->image, __LINE__, VALIDATION_ERROR_02171, "IMAGE", "%s %s", ss.str().c_str(), 6728 validation_error_map[VALIDATION_ERROR_02171]); 6729 // TODO: Uncompressed formats are compatible if they occupy they same number of bits per pixel. 6730 // Compressed formats are compatible if the only difference between them is the numerical type of 6731 // the uncompressed pixels (e.g. signed vs. unsigned, or sRGB vs. UNORM encoding). 6732 } 6733 } 6734 return skip; 6735} 6736 6737static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info, VkImageView view) { 6738 dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, create_info)); 6739 ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, create_info->image); 6740} 6741 6742VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo, 6743 const VkAllocationCallbacks *pAllocator, VkImageView *pView) { 6744 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6745 std::unique_lock<std::mutex> lock(global_lock); 6746 bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo); 6747 lock.unlock(); 6748 if (skip) 6749 return VK_ERROR_VALIDATION_FAILED_EXT; 6750 VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView); 6751 if (VK_SUCCESS == result) { 6752 lock.lock(); 6753 PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView); 6754 lock.unlock(); 6755 } 6756 6757 return result; 6758} 6759 6760VKAPI_ATTR VkResult VKAPI_CALL 6761CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) { 6762 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6763 VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence); 6764 if (VK_SUCCESS == result) { 6765 std::lock_guard<std::mutex> lock(global_lock); 6766 auto &fence_node = dev_data->fenceMap[*pFence]; 6767 fence_node.fence = *pFence; 6768 fence_node.createInfo = *pCreateInfo; 6769 fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED; 6770 } 6771 return result; 6772} 6773 6774// TODO handle pipeline caches 6775VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, 6776 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) { 6777 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6778 VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache); 6779 return result; 6780} 6781 6782VKAPI_ATTR void VKAPI_CALL 6783DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) { 6784 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6785 dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator); 6786} 6787 6788VKAPI_ATTR VkResult VKAPI_CALL 6789GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) { 6790 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6791 VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData); 6792 return result; 6793} 6794 6795VKAPI_ATTR VkResult VKAPI_CALL 6796MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) { 6797 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6798 VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches); 6799 return result; 6800} 6801 6802// utility function to set collective state for pipeline 6803void set_pipeline_state(PIPELINE_STATE *pPipe) { 6804 // If any attachment used by this pipeline has blendEnable, set top-level blendEnable 6805 if (pPipe->graphicsPipelineCI.pColorBlendState) { 6806 for (size_t i = 0; i < pPipe->attachments.size(); ++i) { 6807 if (VK_TRUE == pPipe->attachments[i].blendEnable) { 6808 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6809 (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 6810 ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6811 (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 6812 ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6813 (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 6814 ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6815 (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) { 6816 pPipe->blendConstantsEnabled = true; 6817 } 6818 } 6819 } 6820 } 6821} 6822 6823static bool PreCallCreateGraphicsPipelines(layer_data *device_data, uint32_t count, 6824 const VkGraphicsPipelineCreateInfo *create_infos, vector<PIPELINE_STATE *> &pipe_state) { 6825 bool skip = false; 6826 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map); 6827 6828 for (uint32_t i = 0; i < count; i++) { 6829 skip |= verifyPipelineCreateState(device_data, pipe_state, i); 6830 if (create_infos[i].pVertexInputState != NULL) { 6831 for (uint32_t j = 0; j < create_infos[i].pVertexInputState->vertexAttributeDescriptionCount; j++) { 6832 VkFormat format = create_infos[i].pVertexInputState->pVertexAttributeDescriptions[j].format; 6833 // Internal call to get format info. Still goes through layers, could potentially go directly to ICD. 6834 VkFormatProperties properties; 6835 instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &properties); 6836 if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) { 6837 skip |= log_msg( 6838 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, 6839 __LINE__, VALIDATION_ERROR_01413, "IMAGE", 6840 "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format " 6841 "(%s) is not a supported vertex buffer format. %s", 6842 i, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_01413]); 6843 } 6844 } 6845 } 6846 } 6847 return skip; 6848} 6849 6850VKAPI_ATTR VkResult VKAPI_CALL 6851CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, 6852 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 6853 VkPipeline *pPipelines) { 6854 // TODO What to do with pipelineCache? 6855 // The order of operations here is a little convoluted but gets the job done 6856 // 1. Pipeline create state is first shadowed into PIPELINE_STATE struct 6857 // 2. Create state is then validated (which uses flags setup during shadowing) 6858 // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap 6859 bool skip = false; 6860 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic 6861 vector<PIPELINE_STATE *> pipe_state(count); 6862 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6863 6864 uint32_t i = 0; 6865 std::unique_lock<std::mutex> lock(global_lock); 6866 6867 for (i = 0; i < count; i++) { 6868 pipe_state[i] = new PIPELINE_STATE; 6869 pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]); 6870 pipe_state[i]->render_pass_ci.initialize(getRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr()); 6871 pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout); 6872 } 6873 skip |= PreCallCreateGraphicsPipelines(dev_data, count, pCreateInfos, pipe_state); 6874 6875 if (skip) { 6876 for (i = 0; i < count; i++) { 6877 delete pipe_state[i]; 6878 pPipelines[i] = VK_NULL_HANDLE; 6879 } 6880 return VK_ERROR_VALIDATION_FAILED_EXT; 6881 } 6882 6883 lock.unlock(); 6884 auto result = dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines); 6885 lock.lock(); 6886 for (i = 0; i < count; i++) { 6887 if (pPipelines[i] == VK_NULL_HANDLE) { 6888 delete pipe_state[i]; 6889 } 6890 else { 6891 pipe_state[i]->pipeline = pPipelines[i]; 6892 dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i]; 6893 } 6894 } 6895 6896 return result; 6897} 6898 6899VKAPI_ATTR VkResult VKAPI_CALL 6900CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, 6901 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 6902 VkPipeline *pPipelines) { 6903 bool skip = false; 6904 6905 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic 6906 vector<PIPELINE_STATE *> pPipeState(count); 6907 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6908 6909 uint32_t i = 0; 6910 std::unique_lock<std::mutex> lock(global_lock); 6911 for (i = 0; i < count; i++) { 6912 // TODO: Verify compute stage bits 6913 6914 // Create and initialize internal tracking data structure 6915 pPipeState[i] = new PIPELINE_STATE; 6916 pPipeState[i]->initComputePipeline(&pCreateInfos[i]); 6917 pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout); 6918 6919 // TODO: Add Compute Pipeline Verification 6920 skip |= !validate_compute_pipeline(dev_data->report_data, pPipeState[i], &dev_data->enabled_features, 6921 dev_data->shaderModuleMap); 6922 // skip |= verifyPipelineCreateState(dev_data, pPipeState[i]); 6923 } 6924 6925 if (skip) { 6926 for (i = 0; i < count; i++) { 6927 // Clean up any locally allocated data structures 6928 delete pPipeState[i]; 6929 pPipelines[i] = VK_NULL_HANDLE; 6930 } 6931 return VK_ERROR_VALIDATION_FAILED_EXT; 6932 } 6933 6934 lock.unlock(); 6935 auto result = dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines); 6936 lock.lock(); 6937 for (i = 0; i < count; i++) { 6938 if (pPipelines[i] == VK_NULL_HANDLE) { 6939 delete pPipeState[i]; 6940 } 6941 else { 6942 pPipeState[i]->pipeline = pPipelines[i]; 6943 dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i]; 6944 } 6945 } 6946 6947 return result; 6948} 6949 6950VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, 6951 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) { 6952 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6953 VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler); 6954 if (VK_SUCCESS == result) { 6955 std::lock_guard<std::mutex> lock(global_lock); 6956 dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo)); 6957 } 6958 return result; 6959} 6960 6961static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) { 6962 if (dev_data->instance_data->disabled.create_descriptor_set_layout) 6963 return false; 6964 return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info); 6965} 6966 6967static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info, 6968 VkDescriptorSetLayout set_layout) { 6969 // TODO: Convert this to unique_ptr to avoid leaks 6970 dev_data->descriptorSetLayoutMap[set_layout] = new cvdescriptorset::DescriptorSetLayout(create_info, set_layout); 6971} 6972 6973VKAPI_ATTR VkResult VKAPI_CALL 6974CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, 6975 const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) { 6976 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6977 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 6978 std::unique_lock<std::mutex> lock(global_lock); 6979 bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo); 6980 if (!skip) { 6981 lock.unlock(); 6982 result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout); 6983 if (VK_SUCCESS == result) { 6984 lock.lock(); 6985 PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout); 6986 } 6987 } 6988 return result; 6989} 6990 6991// Used by CreatePipelineLayout and CmdPushConstants. 6992// Note that the index argument is optional and only used by CreatePipelineLayout. 6993static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size, 6994 const char *caller_name, uint32_t index = 0) { 6995 if (dev_data->instance_data->disabled.push_constant_range) 6996 return false; 6997 uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize; 6998 bool skip_call = false; 6999 // Check that offset + size don't exceed the max. 7000 // Prevent arithetic overflow here by avoiding addition and testing in this order. 7001 if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) { 7002 // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem. 7003 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { 7004 if (offset >= maxPushConstantsSize) { 7005 skip_call |= 7006 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7007 VALIDATION_ERROR_00877, "DS", "%s call has push constants index %u with offset %u that " 7008 "exceeds this device's maxPushConstantSize of %u. %s", 7009 caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]); 7010 } 7011 if (size > maxPushConstantsSize - offset) { 7012 skip_call |= 7013 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7014 VALIDATION_ERROR_00880, "DS", "%s call has push constants index %u with offset %u and size %u that " 7015 "exceeds this device's maxPushConstantSize of %u. %s", 7016 caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00880]); 7017 } 7018 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { 7019 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7020 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that " 7021 "exceeds this device's maxPushConstantSize of %u.", 7022 caller_name, offset, size, maxPushConstantsSize); 7023 } else { 7024 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7025 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name); 7026 } 7027 } 7028 // size needs to be non-zero and a multiple of 4. 7029 if ((size == 0) || ((size & 0x3) != 0)) { 7030 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { 7031 if (size == 0) { 7032 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 7033 __LINE__, VALIDATION_ERROR_00878, "DS", "%s call has push constants index %u with " 7034 "size %u. Size must be greater than zero. %s", 7035 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]); 7036 } 7037 if (size & 0x3) { 7038 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 7039 __LINE__, VALIDATION_ERROR_00879, "DS", "%s call has push constants index %u with " 7040 "size %u. Size must be a multiple of 4. %s", 7041 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00879]); 7042 } 7043 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { 7044 skip_call |= 7045 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7046 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with " 7047 "size %u. Size must be greater than zero and a multiple of 4.", 7048 caller_name, size); 7049 } else { 7050 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7051 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name); 7052 } 7053 } 7054 // offset needs to be a multiple of 4. 7055 if ((offset & 0x3) != 0) { 7056 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { 7057 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7058 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with " 7059 "offset %u. Offset must be a multiple of 4.", 7060 caller_name, index, offset); 7061 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { 7062 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7063 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with " 7064 "offset %u. Offset must be a multiple of 4.", 7065 caller_name, offset); 7066 } else { 7067 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7068 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name); 7069 } 7070 } 7071 return skip_call; 7072} 7073 7074VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, 7075 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) { 7076 bool skip_call = false; 7077 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 7078 // TODO : Add checks for VALIDATION_ERRORS 865-871 7079 // Push Constant Range checks 7080 uint32_t i, j; 7081 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { 7082 skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset, 7083 pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i); 7084 if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) { 7085 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7086 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set."); 7087 } 7088 } 7089 if (skip_call) 7090 return VK_ERROR_VALIDATION_FAILED_EXT; 7091 7092 // Each range has been validated. Now check for overlap between ranges (if they are good). 7093 // There's no explicit Valid Usage language against this, so issue a warning instead of an error. 7094 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { 7095 for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) { 7096 const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset; 7097 const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size; 7098 const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset; 7099 const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size; 7100 if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) { 7101 skip_call |= 7102 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7103 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with " 7104 "overlapping ranges: %u:[%u, %u), %u:[%u, %u)", 7105 i, minA, maxA, j, minB, maxB); 7106 } 7107 } 7108 } 7109 7110 VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout); 7111 if (VK_SUCCESS == result) { 7112 std::lock_guard<std::mutex> lock(global_lock); 7113 PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout]; 7114 plNode.layout = *pPipelineLayout; 7115 plNode.set_layouts.resize(pCreateInfo->setLayoutCount); 7116 for (i = 0; i < pCreateInfo->setLayoutCount; ++i) { 7117 plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]); 7118 } 7119 plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount); 7120 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { 7121 plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i]; 7122 } 7123 } 7124 return result; 7125} 7126 7127VKAPI_ATTR VkResult VKAPI_CALL 7128CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, 7129 VkDescriptorPool *pDescriptorPool) { 7130 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 7131 VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool); 7132 if (VK_SUCCESS == result) { 7133 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 7134 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64, 7135 (uint64_t)*pDescriptorPool)) 7136 return VK_ERROR_VALIDATION_FAILED_EXT; 7137 DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo); 7138 if (NULL == pNewNode) { 7139 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 7140 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", 7141 "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()")) 7142 return VK_ERROR_VALIDATION_FAILED_EXT; 7143 } else { 7144 std::lock_guard<std::mutex> lock(global_lock); 7145 dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode; 7146 } 7147 } else { 7148 // Need to do anything if pool create fails? 7149 } 7150 return result; 7151} 7152 7153VKAPI_ATTR VkResult VKAPI_CALL 7154ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) { 7155 // TODO : Add checks for VALIDATION_ERROR_00928 7156 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 7157 VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags); 7158 if (VK_SUCCESS == result) { 7159 std::lock_guard<std::mutex> lock(global_lock); 7160 clearDescriptorPool(dev_data, device, descriptorPool, flags); 7161 } 7162 return result; 7163} 7164// Ensure the pool contains enough descriptors and descriptor sets to satisfy 7165// an allocation request. Fills common_data with the total number of descriptors of each type required, 7166// as well as DescriptorSetLayout ptrs used for later update. 7167static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo, 7168 cvdescriptorset::AllocateDescriptorSetsData *common_data) { 7169 if (dev_data->instance_data->disabled.allocate_descriptor_sets) 7170 return false; 7171 // All state checks for AllocateDescriptorSets is done in single function 7172 return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data); 7173} 7174// Allocation state was good and call down chain was made so update state based on allocating descriptor sets 7175static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo, 7176 VkDescriptorSet *pDescriptorSets, 7177 const cvdescriptorset::AllocateDescriptorSetsData *common_data) { 7178 // All the updates are contained in a single cvdescriptorset function 7179 cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap, 7180 &dev_data->setMap, dev_data); 7181} 7182 7183VKAPI_ATTR VkResult VKAPI_CALL 7184AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) { 7185 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 7186 std::unique_lock<std::mutex> lock(global_lock); 7187 cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount); 7188 bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data); 7189 lock.unlock(); 7190 7191 if (skip_call) 7192 return VK_ERROR_VALIDATION_FAILED_EXT; 7193 7194 VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); 7195 7196 if (VK_SUCCESS == result) { 7197 lock.lock(); 7198 PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data); 7199 lock.unlock(); 7200 } 7201 return result; 7202} 7203// Verify state before freeing DescriptorSets 7204static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count, 7205 const VkDescriptorSet *descriptor_sets) { 7206 if (dev_data->instance_data->disabled.free_descriptor_sets) 7207 return false; 7208 bool skip_call = false; 7209 // First make sure sets being destroyed are not currently in-use 7210 for (uint32_t i = 0; i < count; ++i) 7211 skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets"); 7212 7213 DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool); 7214 if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) { 7215 // Can't Free from a NON_FREE pool 7216 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 7217 reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS", 7218 "It is invalid to call vkFreeDescriptorSets() with a pool created without setting " 7219 "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s", 7220 validation_error_map[VALIDATION_ERROR_00922]); 7221 } 7222 return skip_call; 7223} 7224// Sets have been removed from the pool so update underlying state 7225static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count, 7226 const VkDescriptorSet *descriptor_sets) { 7227 DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool); 7228 // Update available descriptor sets in pool 7229 pool_state->availableSets += count; 7230 7231 // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap 7232 for (uint32_t i = 0; i < count; ++i) { 7233 auto set_state = dev_data->setMap[descriptor_sets[i]]; 7234 uint32_t type_index = 0, descriptor_count = 0; 7235 for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) { 7236 type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j)); 7237 descriptor_count = set_state->GetDescriptorCountFromIndex(j); 7238 pool_state->availableDescriptorTypeCount[type_index] += descriptor_count; 7239 } 7240 freeDescriptorSet(dev_data, set_state); 7241 pool_state->sets.erase(set_state); 7242 } 7243} 7244 7245VKAPI_ATTR VkResult VKAPI_CALL 7246FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) { 7247 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 7248 // Make sure that no sets being destroyed are in-flight 7249 std::unique_lock<std::mutex> lock(global_lock); 7250 bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets); 7251 lock.unlock(); 7252 7253 if (skip_call) 7254 return VK_ERROR_VALIDATION_FAILED_EXT; 7255 VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets); 7256 if (VK_SUCCESS == result) { 7257 lock.lock(); 7258 PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets); 7259 lock.unlock(); 7260 } 7261 return result; 7262} 7263// TODO : This is a Proof-of-concept for core validation architecture 7264// Really we'll want to break out these functions to separate files but 7265// keeping it all together here to prove out design 7266// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets() 7267static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount, 7268 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, 7269 const VkCopyDescriptorSet *pDescriptorCopies) { 7270 if (dev_data->instance_data->disabled.update_descriptor_sets) 7271 return false; 7272 // First thing to do is perform map look-ups. 7273 // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets 7274 // so we can't just do a single map look-up up-front, but do them individually in functions below 7275 7276 // Now make call(s) that validate state, but don't perform state updates in this function 7277 // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the 7278 // namespace which will parse params and make calls into specific class instances 7279 return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites, 7280 descriptorCopyCount, pDescriptorCopies); 7281} 7282// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets() 7283static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount, 7284 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, 7285 const VkCopyDescriptorSet *pDescriptorCopies) { 7286 cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 7287 pDescriptorCopies); 7288} 7289 7290VKAPI_ATTR void VKAPI_CALL 7291UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, 7292 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { 7293 // Only map look-up at top level is for device-level layer_data 7294 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 7295 std::unique_lock<std::mutex> lock(global_lock); 7296 bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 7297 pDescriptorCopies); 7298 lock.unlock(); 7299 if (!skip_call) { 7300 dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 7301 pDescriptorCopies); 7302 lock.lock(); 7303 // Since UpdateDescriptorSets() is void, nothing to check prior to updating state 7304 PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 7305 pDescriptorCopies); 7306 } 7307} 7308 7309VKAPI_ATTR VkResult VKAPI_CALL 7310AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) { 7311 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 7312 VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer); 7313 if (VK_SUCCESS == result) { 7314 std::unique_lock<std::mutex> lock(global_lock); 7315 auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool); 7316 7317 if (pPool) { 7318 for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) { 7319 // Add command buffer to its commandPool map 7320 pPool->commandBuffers.push_back(pCommandBuffer[i]); 7321 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE; 7322 // Add command buffer to map 7323 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB; 7324 resetCB(dev_data, pCommandBuffer[i]); 7325 pCB->createInfo = *pCreateInfo; 7326 pCB->device = device; 7327 } 7328 } 7329 lock.unlock(); 7330 } 7331 return result; 7332} 7333 7334// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children 7335static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) { 7336 addCommandBufferBinding(&fb_state->cb_bindings, 7337 {reinterpret_cast<uint64_t &>(fb_state->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT}, 7338 cb_state); 7339 for (auto attachment : fb_state->attachments) { 7340 auto view_state = attachment.view_state; 7341 if (view_state) { 7342 AddCommandBufferBindingImageView(dev_data, cb_state, view_state); 7343 } 7344 auto rp_state = getRenderPassState(dev_data, fb_state->createInfo.renderPass); 7345 if (rp_state) { 7346 addCommandBufferBinding( 7347 &rp_state->cb_bindings, 7348 {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state); 7349 } 7350 } 7351} 7352 7353VKAPI_ATTR VkResult VKAPI_CALL 7354BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { 7355 bool skip_call = false; 7356 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7357 std::unique_lock<std::mutex> lock(global_lock); 7358 // Validate command buffer level 7359 GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer); 7360 if (cb_node) { 7361 // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references 7362 if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) { 7363 skip_call |= 7364 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7365 (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM", 7366 "Calling vkBeginCommandBuffer() on active command buffer 0x%p before it has completed. " 7367 "You must check command buffer fence before this call.", 7368 commandBuffer); 7369 } 7370 clear_cmd_buf_and_mem_references(dev_data, cb_node); 7371 if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 7372 // Secondary Command Buffer 7373 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo; 7374 if (!pInfo) { 7375 skip_call |= 7376 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7377 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 7378 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.", commandBuffer); 7379 } else { 7380 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { 7381 if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB 7382 skip_call |= log_msg( 7383 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7384 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 7385 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.", 7386 commandBuffer); 7387 } 7388 if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf 7389 skip_call |= log_msg( 7390 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7391 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 7392 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a " 7393 "valid framebuffer parameter is specified.", 7394 commandBuffer); 7395 } else { 7396 string errorString = ""; 7397 auto framebuffer = getFramebufferState(dev_data, pInfo->framebuffer); 7398 if (framebuffer) { 7399 if ((framebuffer->createInfo.renderPass != pInfo->renderPass) && 7400 !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(), 7401 getRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(), 7402 errorString)) { 7403 // renderPass that framebuffer was created with must be compatible with local renderPass 7404 skip_call |= 7405 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7406 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7407 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, 7408 "DS", "vkBeginCommandBuffer(): Secondary Command " 7409 "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer " 7410 "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s", 7411 commandBuffer, reinterpret_cast<const uint64_t &>(pInfo->renderPass), 7412 reinterpret_cast<const uint64_t &>(pInfo->framebuffer), 7413 reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str()); 7414 } 7415 // Connect this framebuffer and its children to this cmdBuffer 7416 AddFramebufferBinding(dev_data, cb_node, framebuffer); 7417 } 7418 } 7419 } 7420 if ((pInfo->occlusionQueryEnable == VK_FALSE || 7421 dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) && 7422 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) { 7423 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7424 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer), 7425 __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 7426 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have " 7427 "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not " 7428 "support precise occlusion queries.", 7429 commandBuffer); 7430 } 7431 } 7432 if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) { 7433 auto renderPass = getRenderPassState(dev_data, pInfo->renderPass); 7434 if (renderPass) { 7435 if (pInfo->subpass >= renderPass->createInfo.subpassCount) { 7436 skip_call |= log_msg( 7437 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7438 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 7439 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) " 7440 "that is less than the number of subpasses (%d).", 7441 commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount); 7442 } 7443 } 7444 } 7445 } 7446 if (CB_RECORDING == cb_node->state) { 7447 skip_call |= 7448 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7449 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 7450 "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p" 7451 ") in the RECORDING state. Must first call vkEndCommandBuffer().", 7452 commandBuffer); 7453 } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->last_cmd)) { 7454 VkCommandPool cmdPool = cb_node->createInfo.commandPool; 7455 auto pPool = getCommandPoolNode(dev_data, cmdPool); 7456 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) { 7457 skip_call |= 7458 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7459 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 7460 "Call to vkBeginCommandBuffer() on command buffer (0x%p" 7461 ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64 7462 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", 7463 commandBuffer, (uint64_t)cmdPool); 7464 } 7465 resetCB(dev_data, commandBuffer); 7466 } 7467 // Set updated state here in case implicit reset occurs above 7468 cb_node->state = CB_RECORDING; 7469 cb_node->beginInfo = *pBeginInfo; 7470 if (cb_node->beginInfo.pInheritanceInfo) { 7471 cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo); 7472 cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo; 7473 // If we are a secondary command-buffer and inheriting. Update the items we should inherit. 7474 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) && 7475 (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { 7476 cb_node->activeRenderPass = getRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass); 7477 cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass; 7478 cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer; 7479 cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer); 7480 } 7481 } 7482 } else { 7483 skip_call |= 7484 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7485 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 7486 "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for command buffer 0x%p!", commandBuffer); 7487 } 7488 lock.unlock(); 7489 if (skip_call) { 7490 return VK_ERROR_VALIDATION_FAILED_EXT; 7491 } 7492 VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo); 7493 7494 return result; 7495} 7496 7497VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) { 7498 bool skip_call = false; 7499 VkResult result = VK_SUCCESS; 7500 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7501 std::unique_lock<std::mutex> lock(global_lock); 7502 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7503 if (pCB) { 7504 if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { 7505 // This needs spec clarification to update valid usage, see comments in PR: 7506 // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756 7507 skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_00123); 7508 } 7509 skip_call |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()"); 7510 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_END); 7511 for (auto query : pCB->activeQueries) { 7512 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7513 DRAWSTATE_INVALID_QUERY, "DS", 7514 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d", 7515 (uint64_t)(query.pool), query.index); 7516 } 7517 } 7518 if (!skip_call) { 7519 lock.unlock(); 7520 result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer); 7521 lock.lock(); 7522 if (VK_SUCCESS == result) { 7523 pCB->state = CB_RECORDED; 7524 // Reset CB status flags 7525 pCB->status = 0; 7526 } 7527 } else { 7528 result = VK_ERROR_VALIDATION_FAILED_EXT; 7529 } 7530 lock.unlock(); 7531 return result; 7532} 7533 7534VKAPI_ATTR VkResult VKAPI_CALL 7535ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) { 7536 bool skip_call = false; 7537 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7538 std::unique_lock<std::mutex> lock(global_lock); 7539 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7540 VkCommandPool cmdPool = pCB->createInfo.commandPool; 7541 auto pPool = getCommandPoolNode(dev_data, cmdPool); 7542 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) { 7543 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7544 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 7545 "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64 7546 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", 7547 commandBuffer, (uint64_t)cmdPool); 7548 } 7549 skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_00092); 7550 lock.unlock(); 7551 if (skip_call) 7552 return VK_ERROR_VALIDATION_FAILED_EXT; 7553 VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags); 7554 if (VK_SUCCESS == result) { 7555 lock.lock(); 7556 dev_data->globalInFlightCmdBuffers.erase(commandBuffer); 7557 resetCB(dev_data, commandBuffer); 7558 lock.unlock(); 7559 } 7560 return result; 7561} 7562 7563VKAPI_ATTR void VKAPI_CALL 7564CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) { 7565 bool skip = false; 7566 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7567 std::unique_lock<std::mutex> lock(global_lock); 7568 GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer); 7569 if (cb_state) { 7570 skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()"); 7571 UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_BINDPIPELINE); 7572 if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) { 7573 skip |= 7574 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 7575 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS", 7576 "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")", 7577 (uint64_t)pipeline, (uint64_t)cb_state->activeRenderPass->renderPass); 7578 } 7579 7580 PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline); 7581 if (pipe_state) { 7582 cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state; 7583 set_cb_pso_status(cb_state, pipe_state); 7584 set_pipeline_state(pipe_state); 7585 } else { 7586 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 7587 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS", 7588 "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline)); 7589 } 7590 addCommandBufferBinding(&pipe_state->cb_bindings, 7591 {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, cb_state); 7592 if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) { 7593 // Add binding for child renderpass 7594 auto rp_state = getRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass); 7595 if (rp_state) { 7596 addCommandBufferBinding( 7597 &rp_state->cb_bindings, 7598 {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state); 7599 } 7600 } 7601 } 7602 lock.unlock(); 7603 if (!skip) 7604 dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline); 7605} 7606 7607VKAPI_ATTR void VKAPI_CALL 7608CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { 7609 bool skip_call = false; 7610 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7611 std::unique_lock<std::mutex> lock(global_lock); 7612 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7613 if (pCB) { 7614 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()"); 7615 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE); 7616 pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport; 7617 } 7618 lock.unlock(); 7619 if (!skip_call) 7620 dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports); 7621} 7622 7623VKAPI_ATTR void VKAPI_CALL 7624CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { 7625 bool skip_call = false; 7626 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7627 std::unique_lock<std::mutex> lock(global_lock); 7628 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7629 if (pCB) { 7630 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()"); 7631 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSCISSORSTATE); 7632 pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor; 7633 } 7634 lock.unlock(); 7635 if (!skip_call) 7636 dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors); 7637} 7638 7639VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { 7640 bool skip_call = false; 7641 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7642 std::unique_lock<std::mutex> lock(global_lock); 7643 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7644 if (pCB) { 7645 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()"); 7646 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE); 7647 pCB->status |= CBSTATUS_LINE_WIDTH_SET; 7648 7649 PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state; 7650 if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) { 7651 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 7652 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS", 7653 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH " 7654 "flag. This is undefined behavior and could be ignored."); 7655 } else { 7656 skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth); 7657 } 7658 } 7659 lock.unlock(); 7660 if (!skip_call) 7661 dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth); 7662} 7663 7664VKAPI_ATTR void VKAPI_CALL 7665CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) { 7666 bool skip_call = false; 7667 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7668 std::unique_lock<std::mutex> lock(global_lock); 7669 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7670 if (pCB) { 7671 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()"); 7672 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE); 7673 pCB->status |= CBSTATUS_DEPTH_BIAS_SET; 7674 } 7675 lock.unlock(); 7676 if (!skip_call) 7677 dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor); 7678} 7679 7680VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) { 7681 bool skip_call = false; 7682 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7683 std::unique_lock<std::mutex> lock(global_lock); 7684 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7685 if (pCB) { 7686 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()"); 7687 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETBLENDSTATE); 7688 pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET; 7689 } 7690 lock.unlock(); 7691 if (!skip_call) 7692 dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants); 7693} 7694 7695VKAPI_ATTR void VKAPI_CALL 7696CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) { 7697 bool skip_call = false; 7698 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7699 std::unique_lock<std::mutex> lock(global_lock); 7700 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7701 if (pCB) { 7702 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()"); 7703 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE); 7704 pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET; 7705 } 7706 lock.unlock(); 7707 if (!skip_call) 7708 dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds); 7709} 7710 7711VKAPI_ATTR void VKAPI_CALL 7712CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) { 7713 bool skip_call = false; 7714 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7715 std::unique_lock<std::mutex> lock(global_lock); 7716 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7717 if (pCB) { 7718 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()"); 7719 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE); 7720 pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET; 7721 } 7722 lock.unlock(); 7723 if (!skip_call) 7724 dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask); 7725} 7726 7727VKAPI_ATTR void VKAPI_CALL 7728CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) { 7729 bool skip_call = false; 7730 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7731 std::unique_lock<std::mutex> lock(global_lock); 7732 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7733 if (pCB) { 7734 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()"); 7735 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE); 7736 pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET; 7737 } 7738 lock.unlock(); 7739 if (!skip_call) 7740 dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask); 7741} 7742 7743VKAPI_ATTR void VKAPI_CALL 7744CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { 7745 bool skip_call = false; 7746 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7747 std::unique_lock<std::mutex> lock(global_lock); 7748 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7749 if (pCB) { 7750 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()"); 7751 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE); 7752 pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET; 7753 } 7754 lock.unlock(); 7755 if (!skip_call) 7756 dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference); 7757} 7758 7759VKAPI_ATTR void VKAPI_CALL 7760CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, 7761 uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, 7762 const uint32_t *pDynamicOffsets) { 7763 bool skip_call = false; 7764 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7765 std::unique_lock<std::mutex> lock(global_lock); 7766 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7767 if (pCB) { 7768 if (pCB->state == CB_RECORDING) { 7769 // Track total count of dynamic descriptor types to make sure we have an offset for each one 7770 uint32_t totalDynamicDescriptors = 0; 7771 string errorString = ""; 7772 uint32_t lastSetIndex = firstSet + setCount - 1; 7773 if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) { 7774 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1); 7775 pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1); 7776 } 7777 auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex]; 7778 auto pipeline_layout = getPipelineLayout(dev_data, layout); 7779 for (uint32_t i = 0; i < setCount; i++) { 7780 cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]); 7781 if (pSet) { 7782 pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout; 7783 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet; 7784 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 7785 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7786 DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s", 7787 (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint)); 7788 if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) { 7789 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 7790 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7791 DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 7792 "Descriptor Set 0x%" PRIxLEAST64 7793 " bound but it was never updated. You may want to either update it or not bind it.", 7794 (uint64_t)pDescriptorSets[i]); 7795 } 7796 // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout 7797 if (!verify_set_layout_compatibility(dev_data, pSet, pipeline_layout, i + firstSet, errorString)) { 7798 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7799 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7800 DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS", 7801 "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout " 7802 "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s", 7803 i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str()); 7804 } 7805 7806 auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount(); 7807 7808 pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear(); 7809 7810 if (setDynamicDescriptorCount) { 7811 // First make sure we won't overstep bounds of pDynamicOffsets array 7812 if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) { 7813 skip_call |= 7814 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7815 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7816 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS", 7817 "descriptorSet #%u (0x%" PRIxLEAST64 7818 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets " 7819 "array. There must be one dynamic offset for each dynamic descriptor being bound.", 7820 i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(), 7821 (dynamicOffsetCount - totalDynamicDescriptors)); 7822 } else { // Validate and store dynamic offsets with the set 7823 // Validate Dynamic Offset Minimums 7824 uint32_t cur_dyn_offset = totalDynamicDescriptors; 7825 for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) { 7826 if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) { 7827 if (vk_safe_modulo( 7828 pDynamicOffsets[cur_dyn_offset], 7829 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) { 7830 skip_call |= log_msg( 7831 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7832 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 7833 DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS", 7834 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of " 7835 "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64, 7836 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset], 7837 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment); 7838 } 7839 cur_dyn_offset++; 7840 } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { 7841 if (vk_safe_modulo( 7842 pDynamicOffsets[cur_dyn_offset], 7843 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) { 7844 skip_call |= log_msg( 7845 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7846 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 7847 DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS", 7848 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of " 7849 "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64, 7850 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset], 7851 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment); 7852 } 7853 cur_dyn_offset++; 7854 } 7855 } 7856 7857 pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] = 7858 std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors, 7859 pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount); 7860 // Keep running total of dynamic descriptor count to verify at the end 7861 totalDynamicDescriptors += setDynamicDescriptorCount; 7862 7863 } 7864 } 7865 } else { 7866 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7867 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7868 DRAWSTATE_INVALID_SET, "DS", "Attempt to bind descriptor set 0x%" PRIxLEAST64 7869 " that doesn't exist!", 7870 (uint64_t)pDescriptorSets[i]); 7871 } 7872 skip_call |= ValidateCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()"); 7873 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS); 7874 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update 7875 if (firstSet > 0) { // Check set #s below the first bound set 7876 for (uint32_t i = 0; i < firstSet; ++i) { 7877 if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] && 7878 !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], 7879 pipeline_layout, i, errorString)) { 7880 skip_call |= log_msg( 7881 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 7882 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 7883 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", 7884 "DescriptorSet 0x%" PRIxLEAST64 7885 " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")", 7886 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout); 7887 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE; 7888 } 7889 } 7890 } 7891 // Check if newly last bound set invalidates any remaining bound sets 7892 if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) { 7893 if (oldFinalBoundSet && 7894 !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) { 7895 auto old_set = oldFinalBoundSet->GetSet(); 7896 skip_call |= 7897 log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 7898 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__, 7899 DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64 7900 " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64 7901 " newly bound as set #%u so set #%u and any subsequent sets were " 7902 "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")", 7903 reinterpret_cast<uint64_t &>(old_set), lastSetIndex, 7904 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex, 7905 lastSetIndex + 1, (uint64_t)layout); 7906 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1); 7907 } 7908 } 7909 } 7910 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound 7911 if (totalDynamicDescriptors != dynamicOffsetCount) { 7912 skip_call |= 7913 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7914 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS", 7915 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount " 7916 "is %u. It should exactly match the number of dynamic descriptors.", 7917 setCount, totalDynamicDescriptors, dynamicOffsetCount); 7918 } 7919 } else { 7920 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()"); 7921 } 7922 } 7923 lock.unlock(); 7924 if (!skip_call) 7925 dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount, 7926 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets); 7927} 7928 7929VKAPI_ATTR void VKAPI_CALL 7930CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) { 7931 bool skip_call = false; 7932 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7933 // TODO : Somewhere need to verify that IBs have correct usage state flagged 7934 std::unique_lock<std::mutex> lock(global_lock); 7935 7936 auto buffer_state = getBufferState(dev_data, buffer); 7937 auto cb_node = getCBNode(dev_data, commandBuffer); 7938 if (cb_node && buffer_state) { 7939 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()"); 7940 std::function<bool()> function = [=]() { 7941 return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()"); 7942 }; 7943 cb_node->validate_functions.push_back(function); 7944 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()"); 7945 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER); 7946 VkDeviceSize offset_align = 0; 7947 switch (indexType) { 7948 case VK_INDEX_TYPE_UINT16: 7949 offset_align = 2; 7950 break; 7951 case VK_INDEX_TYPE_UINT32: 7952 offset_align = 4; 7953 break; 7954 default: 7955 // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0 7956 break; 7957 } 7958 if (!offset_align || (offset % offset_align)) { 7959 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7960 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS", 7961 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", 7962 offset, string_VkIndexType(indexType)); 7963 } 7964 cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND; 7965 } else { 7966 assert(0); 7967 } 7968 lock.unlock(); 7969 if (!skip_call) 7970 dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType); 7971} 7972 7973void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) { 7974 uint32_t end = firstBinding + bindingCount; 7975 if (pCB->currentDrawData.buffers.size() < end) { 7976 pCB->currentDrawData.buffers.resize(end); 7977 } 7978 for (uint32_t i = 0; i < bindingCount; ++i) { 7979 pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i]; 7980 } 7981} 7982 7983static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); } 7984 7985VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, 7986 uint32_t bindingCount, const VkBuffer *pBuffers, 7987 const VkDeviceSize *pOffsets) { 7988 bool skip_call = false; 7989 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7990 // TODO : Somewhere need to verify that VBs have correct usage state flagged 7991 std::unique_lock<std::mutex> lock(global_lock); 7992 7993 auto cb_node = getCBNode(dev_data, commandBuffer); 7994 if (cb_node) { 7995 for (uint32_t i = 0; i < bindingCount; ++i) { 7996 auto buffer_state = getBufferState(dev_data, pBuffers[i]); 7997 assert(buffer_state); 7998 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()"); 7999 std::function<bool()> function = [=]() { 8000 return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()"); 8001 }; 8002 cb_node->validate_functions.push_back(function); 8003 } 8004 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()"); 8005 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER); 8006 updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers); 8007 } else { 8008 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()"); 8009 } 8010 lock.unlock(); 8011 if (!skip_call) 8012 dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets); 8013} 8014 8015/* expects global_lock to be held by caller */ 8016static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 8017 for (auto imageView : pCB->updateImages) { 8018 auto view_state = getImageViewState(dev_data, imageView); 8019 if (!view_state) 8020 continue; 8021 8022 auto image_state = getImageState(dev_data, view_state->create_info.image); 8023 assert(image_state); 8024 std::function<bool()> function = [=]() { 8025 SetImageMemoryValid(dev_data, image_state, true); 8026 return false; 8027 }; 8028 pCB->validate_functions.push_back(function); 8029 } 8030 for (auto buffer : pCB->updateBuffers) { 8031 auto buffer_state = getBufferState(dev_data, buffer); 8032 assert(buffer_state); 8033 std::function<bool()> function = [=]() { 8034 SetBufferMemoryValid(dev_data, buffer_state, true); 8035 return false; 8036 }; 8037 pCB->validate_functions.push_back(function); 8038 } 8039} 8040 8041static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) { 8042 MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state); 8043 updateResourceTrackingOnDraw(cb_state); 8044 UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_DRAW); 8045 cb_state->drawCount[DRAW]++; 8046} 8047 8048VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, 8049 uint32_t firstVertex, uint32_t firstInstance) { 8050 bool skip_call = false; 8051 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8052 std::unique_lock<std::mutex> lock(global_lock); 8053 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8054 if (pCB) { 8055 skip_call |= ValidateCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()"); 8056 // TODO : Split this into validate/state update. Also at state update time, set bool to note if/when 8057 // vtx buffers are consumed and only flag perf warning if bound vtx buffers have not been consumed 8058 skip_call |= ValidateAndUpdateDrawState(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw"); 8059 // TODO : Do we need to do this anymore? 8060 skip_call |= 8061 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 8062 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_NONE, "DS", 8063 "vkCmdDraw() call 0x%" PRIx64 ", reporting descriptor set state:", g_drawCount[DRAW]++); 8064 skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer); 8065 // TODO : This is only validation 8066 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDraw()", VALIDATION_ERROR_01365); 8067 } 8068 lock.unlock(); 8069 if (!skip_call) { 8070 dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance); 8071 lock.lock(); 8072 PostCallRecordCmdDraw(dev_data, pCB); 8073 lock.unlock(); 8074 } 8075} 8076 8077VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, 8078 uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, 8079 uint32_t firstInstance) { 8080 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8081 bool skip_call = false; 8082 std::unique_lock<std::mutex> lock(global_lock); 8083 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8084 if (pCB) { 8085 skip_call |= ValidateCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()"); 8086 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_DRAWINDEXED); 8087 pCB->drawCount[DRAW_INDEXED]++; 8088 skip_call |= ValidateAndUpdateDrawState(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed"); 8089 MarkStoreImagesAndBuffersAsWritten(dev_data, pCB); 8090 skip_call |= 8091 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 8092 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_NONE, "DS", 8093 "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting descriptor set state:", g_drawCount[DRAW_INDEXED]++); 8094 skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer); 8095 if (!skip_call) { 8096 updateResourceTrackingOnDraw(pCB); 8097 } 8098 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed()", VALIDATION_ERROR_01372); 8099 } 8100 lock.unlock(); 8101 if (!skip_call) 8102 dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); 8103} 8104 8105VKAPI_ATTR void VKAPI_CALL 8106CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { 8107 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8108 bool skip_call = false; 8109 std::unique_lock<std::mutex> lock(global_lock); 8110 8111 auto cb_node = getCBNode(dev_data, commandBuffer); 8112 auto buffer_state = getBufferState(dev_data, buffer); 8113 if (cb_node && buffer_state) { 8114 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdDrawIndirect()"); 8115 AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state); 8116 skip_call |= ValidateCmd(dev_data, cb_node, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()"); 8117 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_DRAWINDIRECT); 8118 cb_node->drawCount[DRAW_INDIRECT]++; 8119 skip_call |= ValidateAndUpdateDrawState(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect"); 8120 MarkStoreImagesAndBuffersAsWritten(dev_data, cb_node); 8121 skip_call |= 8122 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 8123 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_NONE, "DS", 8124 "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting descriptor set state:", g_drawCount[DRAW_INDIRECT]++); 8125 skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer); 8126 if (!skip_call) { 8127 updateResourceTrackingOnDraw(cb_node); 8128 } 8129 skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndirect()", VALIDATION_ERROR_01381); 8130 } else { 8131 assert(0); 8132 } 8133 lock.unlock(); 8134 if (!skip_call) 8135 dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride); 8136} 8137 8138VKAPI_ATTR void VKAPI_CALL 8139CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { 8140 bool skip_call = false; 8141 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8142 std::unique_lock<std::mutex> lock(global_lock); 8143 8144 auto cb_node = getCBNode(dev_data, commandBuffer); 8145 auto buffer_state = getBufferState(dev_data, buffer); 8146 if (cb_node && buffer_state) { 8147 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdDrawIndexedIndirect()"); 8148 AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state); 8149 skip_call |= ValidateCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()"); 8150 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT); 8151 cb_node->drawCount[DRAW_INDEXED_INDIRECT]++; 8152 skip_call |= 8153 ValidateAndUpdateDrawState(dev_data, cb_node, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect"); 8154 MarkStoreImagesAndBuffersAsWritten(dev_data, cb_node); 8155 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 8156 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, 8157 DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting descriptor set state:", 8158 g_drawCount[DRAW_INDEXED_INDIRECT]++); 8159 skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer); 8160 if (!skip_call) { 8161 updateResourceTrackingOnDraw(cb_node); 8162 } 8163 skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndexedIndirect()", VALIDATION_ERROR_01393); 8164 } else { 8165 assert(0); 8166 } 8167 lock.unlock(); 8168 if (!skip_call) 8169 dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride); 8170} 8171 8172VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) { 8173 bool skip_call = false; 8174 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8175 std::unique_lock<std::mutex> lock(global_lock); 8176 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8177 if (pCB) { 8178 skip_call |= ValidateAndUpdateDrawState(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch"); 8179 MarkStoreImagesAndBuffersAsWritten(dev_data, pCB); 8180 skip_call |= ValidateCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()"); 8181 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_DISPATCH); 8182 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdDispatch()", VALIDATION_ERROR_01562); 8183 } 8184 lock.unlock(); 8185 if (!skip_call) 8186 dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z); 8187} 8188 8189VKAPI_ATTR void VKAPI_CALL 8190CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) { 8191 bool skip_call = false; 8192 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8193 std::unique_lock<std::mutex> lock(global_lock); 8194 8195 auto cb_node = getCBNode(dev_data, commandBuffer); 8196 auto buffer_state = getBufferState(dev_data, buffer); 8197 if (cb_node && buffer_state) { 8198 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdDispatchIndirect()"); 8199 AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state); 8200 skip_call |= ValidateAndUpdateDrawState(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect"); 8201 MarkStoreImagesAndBuffersAsWritten(dev_data, cb_node); 8202 skip_call |= ValidateCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()"); 8203 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT); 8204 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdDispatchIndirect()", VALIDATION_ERROR_01569); 8205 } 8206 lock.unlock(); 8207 if (!skip_call) 8208 dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset); 8209} 8210 8211VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, 8212 uint32_t regionCount, const VkBufferCopy *pRegions) { 8213 bool skip_call = false; 8214 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8215 std::unique_lock<std::mutex> lock(global_lock); 8216 8217 auto cb_node = getCBNode(dev_data, commandBuffer); 8218 auto src_buff_state = getBufferState(dev_data, srcBuffer); 8219 auto dst_buff_state = getBufferState(dev_data, dstBuffer); 8220 if (cb_node && src_buff_state && dst_buff_state) { 8221 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBuffer()"); 8222 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyBuffer()"); 8223 // Update bindings between buffers and cmd buffer 8224 AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state); 8225 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state); 8226 // Validate that SRC & DST buffers have correct usage flags set 8227 skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, 8228 VALIDATION_ERROR_01164, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 8229 skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 8230 VALIDATION_ERROR_01165, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 8231 8232 std::function<bool()> function = [=]() { 8233 return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBuffer()"); 8234 }; 8235 cb_node->validate_functions.push_back(function); 8236 function = [=]() { 8237 SetBufferMemoryValid(dev_data, dst_buff_state, true); 8238 return false; 8239 }; 8240 cb_node->validate_functions.push_back(function); 8241 8242 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()"); 8243 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFER); 8244 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()", VALIDATION_ERROR_01172); 8245 } else { 8246 // Param_checker will flag errors on invalid objects, just assert here as debugging aid 8247 assert(0); 8248 } 8249 lock.unlock(); 8250 if (!skip_call) 8251 dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions); 8252} 8253 8254static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage, 8255 VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) { 8256 bool skip_call = false; 8257 8258 for (uint32_t i = 0; i < subLayers.layerCount; ++i) { 8259 uint32_t layer = i + subLayers.baseArrayLayer; 8260 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer}; 8261 IMAGE_CMD_BUF_LAYOUT_NODE node; 8262 if (!FindLayout(cb_node, srcImage, sub, node)) { 8263 SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout)); 8264 continue; 8265 } 8266 if (node.layout != srcImageLayout) { 8267 // TODO: Improve log message in the next pass 8268 skip_call |= 8269 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 8270 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s " 8271 "and doesn't match the current layout %s.", 8272 string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout)); 8273 } 8274 } 8275 if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) { 8276 if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) { 8277 // TODO : Can we deal with image node from the top of call tree and avoid map look-up here? 8278 auto image_state = getImageState(dev_data, srcImage); 8279 if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) { 8280 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. 8281 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8282 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8283 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL."); 8284 } 8285 } else { 8286 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8287 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be " 8288 "TRANSFER_SRC_OPTIMAL or GENERAL.", 8289 string_VkImageLayout(srcImageLayout)); 8290 } 8291 } 8292 return skip_call; 8293} 8294 8295static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage, 8296 VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) { 8297 bool skip_call = false; 8298 8299 for (uint32_t i = 0; i < subLayers.layerCount; ++i) { 8300 uint32_t layer = i + subLayers.baseArrayLayer; 8301 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer}; 8302 IMAGE_CMD_BUF_LAYOUT_NODE node; 8303 if (!FindLayout(cb_node, destImage, sub, node)) { 8304 SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout)); 8305 continue; 8306 } 8307 if (node.layout != destImageLayout) { 8308 skip_call |= 8309 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 8310 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and " 8311 "doesn't match the current layout %s.", 8312 string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout)); 8313 } 8314 } 8315 if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { 8316 if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) { 8317 auto image_state = getImageState(dev_data, destImage); 8318 if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) { 8319 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. 8320 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8321 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8322 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL."); 8323 } 8324 } else { 8325 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8326 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be " 8327 "TRANSFER_DST_OPTIMAL or GENERAL.", 8328 string_VkImageLayout(destImageLayout)); 8329 } 8330 } 8331 return skip_call; 8332} 8333 8334static bool VerifyClearImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage image, VkImageSubresourceRange range, 8335 VkImageLayout dest_image_layout, const char *func_name) { 8336 bool skip = false; 8337 8338 VkImageSubresourceRange resolvedRange = range; 8339 ResolveRemainingLevelsLayers(dev_data, &resolvedRange, image); 8340 8341 if (dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { 8342 if (dest_image_layout == VK_IMAGE_LAYOUT_GENERAL) { 8343 auto image_state = getImageState(dev_data, image); 8344 if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) { 8345 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. 8346 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 8347 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8348 "%s: Layout for cleared image should be TRANSFER_DST_OPTIMAL instead of GENERAL.", func_name); 8349 } 8350 } else { 8351 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_01086; 8352 if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) { 8353 error_code = VALIDATION_ERROR_01101; 8354 } else { 8355 assert(strcmp(func_name, "vkCmdClearColorImage()") == 0); 8356 } 8357 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8358 error_code, "DS", "%s: Layout for cleared image is %s but can only be " 8359 "TRANSFER_DST_OPTIMAL or GENERAL. %s", 8360 func_name, string_VkImageLayout(dest_image_layout), validation_error_map[error_code]); 8361 } 8362 } 8363 8364 for (uint32_t levelIdx = 0; levelIdx < resolvedRange.levelCount; ++levelIdx) { 8365 uint32_t level = levelIdx + resolvedRange.baseMipLevel; 8366 for (uint32_t layerIdx = 0; layerIdx < resolvedRange.layerCount; ++layerIdx) { 8367 uint32_t layer = layerIdx + resolvedRange.baseArrayLayer; 8368 VkImageSubresource sub = {resolvedRange.aspectMask, level, layer}; 8369 IMAGE_CMD_BUF_LAYOUT_NODE node; 8370 if (!FindLayout(cb_node, image, sub, node)) { 8371 SetLayout(cb_node, image, sub, IMAGE_CMD_BUF_LAYOUT_NODE(dest_image_layout, dest_image_layout)); 8372 continue; 8373 } 8374 if (node.layout != dest_image_layout) { 8375 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_01085; 8376 if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) { 8377 error_code = VALIDATION_ERROR_01100; 8378 } else { 8379 assert(strcmp(func_name, "vkCmdClearColorImage()") == 0); 8380 } 8381 skip |= 8382 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 8383 __LINE__, error_code, "DS", "%s: Cannot clear an image whose layout is %s and " 8384 "doesn't match the current layout %s. %s", 8385 func_name, string_VkImageLayout(dest_image_layout), string_VkImageLayout(node.layout), 8386 validation_error_map[error_code]); 8387 } 8388 } 8389 } 8390 8391 return skip; 8392} 8393 8394// Test if two VkExtent3D structs are equivalent 8395static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) { 8396 bool result = true; 8397 if ((extent->width != other_extent->width) || (extent->height != other_extent->height) || 8398 (extent->depth != other_extent->depth)) { 8399 result = false; 8400 } 8401 return result; 8402} 8403 8404// Returns the image extent of a specific subresource. 8405static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) { 8406 const uint32_t mip = subresource->mipLevel; 8407 VkExtent3D extent = img->createInfo.extent; 8408 extent.width = std::max(1U, extent.width >> mip); 8409 extent.height = std::max(1U, extent.height >> mip); 8410 extent.depth = std::max(1U, extent.depth >> mip); 8411 return extent; 8412} 8413 8414// Test if the extent argument has all dimensions set to 0. 8415static inline bool IsExtentZero(const VkExtent3D *extent) { 8416 return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0)); 8417} 8418 8419// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary. 8420static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) { 8421 // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device. 8422 VkExtent3D granularity = { 0, 0, 0 }; 8423 auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool); 8424 if (pPool) { 8425 granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity; 8426 if (vk_format_is_compressed(img->createInfo.format)) { 8427 auto block_size = vk_format_compressed_block_size(img->createInfo.format); 8428 granularity.width *= block_size.width; 8429 granularity.height *= block_size.height; 8430 } 8431 } 8432 return granularity; 8433} 8434 8435// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure 8436static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) { 8437 bool valid = true; 8438 if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) || 8439 (vk_safe_modulo(extent->height, granularity->height) != 0)) { 8440 valid = false; 8441 } 8442 return valid; 8443} 8444 8445// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values 8446static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset, 8447 const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) { 8448 bool skip = false; 8449 VkExtent3D offset_extent = {}; 8450 offset_extent.width = static_cast<uint32_t>(abs(offset->x)); 8451 offset_extent.height = static_cast<uint32_t>(abs(offset->y)); 8452 offset_extent.depth = static_cast<uint32_t>(abs(offset->z)); 8453 if (IsExtentZero(granularity)) { 8454 // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0) 8455 if (IsExtentZero(&offset_extent) == false) { 8456 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8457 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS", 8458 "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) " 8459 "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).", 8460 function, i, member, offset->x, offset->y, offset->z); 8461 } 8462 } else { 8463 // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even 8464 // integer multiples of the image transfer granularity. 8465 if (IsExtentAligned(&offset_extent, granularity) == false) { 8466 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8467 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS", 8468 "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer " 8469 "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).", 8470 function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height, 8471 granularity->depth); 8472 } 8473 } 8474 return skip; 8475} 8476 8477// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values 8478static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent, 8479 const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent, 8480 const uint32_t i, const char *function, const char *member) { 8481 bool skip = false; 8482 if (IsExtentZero(granularity)) { 8483 // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image 8484 // subresource extent. 8485 if (IsExtentEqual(extent, subresource_extent) == false) { 8486 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8487 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS", 8488 "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) " 8489 "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).", 8490 function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width, 8491 subresource_extent->height, subresource_extent->depth); 8492 } 8493 } else { 8494 // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even 8495 // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image 8496 // subresource extent dimensions. 8497 VkExtent3D offset_extent_sum = {}; 8498 offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width; 8499 offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height; 8500 offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth; 8501 if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) { 8502 skip |= 8503 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8504 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS", 8505 "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's " 8506 "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + " 8507 "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).", 8508 function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height, 8509 granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth, 8510 subresource_extent->width, subresource_extent->height, subresource_extent->depth); 8511 } 8512 } 8513 return skip; 8514} 8515 8516// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value 8517static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value, 8518 const uint32_t granularity, const uint32_t i, const char *function, const char *member) { 8519 bool skip = false; 8520 if (vk_safe_modulo(value, granularity) != 0) { 8521 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8522 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS", 8523 "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image " 8524 "transfer granularity width (%d).", 8525 function, i, member, value, granularity); 8526 } 8527 return skip; 8528} 8529 8530// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value 8531static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value, 8532 const uint32_t granularity, const uint32_t i, const char *function, const char *member) { 8533 bool skip = false; 8534 if (vk_safe_modulo(value, granularity) != 0) { 8535 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8536 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS", 8537 "%s: pRegion[%d].%s (%" PRIdLEAST64 8538 ") must be an even integer multiple of this command buffer's queue family image transfer " 8539 "granularity width (%d).", 8540 function, i, member, value, granularity); 8541 } 8542 return skip; 8543} 8544 8545// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure 8546static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, 8547 const IMAGE_STATE *img, const VkImageCopy *region, 8548 const uint32_t i, const char *function) { 8549 bool skip = false; 8550 VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img); 8551 skip |= CheckItgOffset(dev_data, cb_node, ®ion->srcOffset, &granularity, i, function, "srcOffset"); 8552 skip |= CheckItgOffset(dev_data, cb_node, ®ion->dstOffset, &granularity, i, function, "dstOffset"); 8553 VkExtent3D subresource_extent = GetImageSubresourceExtent(img, ®ion->dstSubresource); 8554 skip |= CheckItgExtent(dev_data, cb_node, ®ion->extent, ®ion->dstOffset, &granularity, &subresource_extent, i, function, 8555 "extent"); 8556 return skip; 8557} 8558 8559// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure 8560static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, 8561 const IMAGE_STATE *img, const VkBufferImageCopy *region, 8562 const uint32_t i, const char *function) { 8563 bool skip = false; 8564 if (vk_format_is_compressed(img->createInfo.format) == true) { 8565 // TODO: Add granularity checking for compressed formats 8566 8567 // bufferRowLength must be a multiple of the compressed texel block width 8568 // bufferImageHeight must be a multiple of the compressed texel block height 8569 // all members of imageOffset must be a multiple of the corresponding dimensions of the compressed texel block 8570 // bufferOffset must be a multiple of the compressed texel block size in bytes 8571 // imageExtent.width must be a multiple of the compressed texel block width or (imageExtent.width + imageOffset.x) 8572 // must equal the image subresource width 8573 // imageExtent.height must be a multiple of the compressed texel block height or (imageExtent.height + imageOffset.y) 8574 // must equal the image subresource height 8575 // imageExtent.depth must be a multiple of the compressed texel block depth or (imageExtent.depth + imageOffset.z) 8576 // must equal the image subresource depth 8577 } else { 8578 VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img); 8579 skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset"); 8580 skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength"); 8581 skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight"); 8582 skip |= CheckItgOffset(dev_data, cb_node, ®ion->imageOffset, &granularity, i, function, "imageOffset"); 8583 VkExtent3D subresource_extent = GetImageSubresourceExtent(img, ®ion->imageSubresource); 8584 skip |= CheckItgExtent(dev_data, cb_node, ®ion->imageExtent, ®ion->imageOffset, &granularity, &subresource_extent, i, 8585 function, "imageExtent"); 8586 } 8587 return skip; 8588} 8589 8590VKAPI_ATTR void VKAPI_CALL 8591CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 8592 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) { 8593 bool skip_call = false; 8594 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8595 std::unique_lock<std::mutex> lock(global_lock); 8596 8597 auto cb_node = getCBNode(dev_data, commandBuffer); 8598 auto src_image_state = getImageState(dev_data, srcImage); 8599 auto dst_image_state = getImageState(dev_data, dstImage); 8600 if (cb_node && src_image_state && dst_image_state) { 8601 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImage()"); 8602 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyImage()"); 8603 // Update bindings between images and cmd buffer 8604 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state); 8605 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state); 8606 // Validate that SRC & DST images have correct usage flags set 8607 skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, 8608 VALIDATION_ERROR_01178, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); 8609 skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, 8610 VALIDATION_ERROR_01181, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); 8611 std::function<bool()> function = [=]() { 8612 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImage()"); 8613 }; 8614 cb_node->validate_functions.push_back(function); 8615 function = [=]() { 8616 SetImageMemoryValid(dev_data, dst_image_state, true); 8617 return false; 8618 }; 8619 cb_node->validate_functions.push_back(function); 8620 8621 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()"); 8622 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGE); 8623 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()", VALIDATION_ERROR_01194); 8624 for (uint32_t i = 0; i < regionCount; ++i) { 8625 skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout); 8626 skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout); 8627 skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i, 8628 "vkCmdCopyImage()"); 8629 } 8630 } else { 8631 assert(0); 8632 } 8633 lock.unlock(); 8634 if (!skip_call) 8635 dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, 8636 pRegions); 8637} 8638 8639// Validate that an image's sampleCount matches the requirement for a specific API call 8640static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, 8641 const char *location) { 8642 bool skip = false; 8643 if (image_state->createInfo.samples != sample_count) { 8644 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 8645 reinterpret_cast<uint64_t &>(image_state->image), 0, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS", 8646 "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s.", location, 8647 reinterpret_cast<uint64_t &>(image_state->image), 8648 string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count)); 8649 } 8650 return skip; 8651} 8652 8653VKAPI_ATTR void VKAPI_CALL 8654CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 8655 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) { 8656 bool skip_call = false; 8657 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8658 std::unique_lock<std::mutex> lock(global_lock); 8659 8660 auto cb_node = getCBNode(dev_data, commandBuffer); 8661 auto src_image_state = getImageState(dev_data, srcImage); 8662 auto dst_image_state = getImageState(dev_data, dstImage); 8663 if (cb_node && src_image_state && dst_image_state) { 8664 skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage"); 8665 skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage"); 8666 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdBlitImage()"); 8667 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdBlitImage()"); 8668 // Update bindings between images and cmd buffer 8669 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state); 8670 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state); 8671 // Validate that SRC & DST images have correct usage flags set 8672 skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, 8673 VALIDATION_ERROR_02182, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); 8674 skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, 8675 VALIDATION_ERROR_02186, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); 8676 std::function<bool()> function = [=]() { 8677 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdBlitImage()"); 8678 }; 8679 cb_node->validate_functions.push_back(function); 8680 function = [=]() { 8681 SetImageMemoryValid(dev_data, dst_image_state, true); 8682 return false; 8683 }; 8684 cb_node->validate_functions.push_back(function); 8685 8686 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()"); 8687 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BLITIMAGE); 8688 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()", VALIDATION_ERROR_01300); 8689 } else { 8690 assert(0); 8691 } 8692 lock.unlock(); 8693 if (!skip_call) 8694 dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, 8695 pRegions, filter); 8696} 8697 8698VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, 8699 VkImage dstImage, VkImageLayout dstImageLayout, 8700 uint32_t regionCount, const VkBufferImageCopy *pRegions) { 8701 bool skip_call = false; 8702 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8703 std::unique_lock<std::mutex> lock(global_lock); 8704 8705 auto cb_node = getCBNode(dev_data, commandBuffer); 8706 auto src_buff_state = getBufferState(dev_data, srcBuffer); 8707 auto dst_image_state = getImageState(dev_data, dstImage); 8708 if (cb_node && src_buff_state && dst_image_state) { 8709 skip_call |= 8710 ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage"); 8711 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBufferToImage()"); 8712 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyBufferToImage()"); 8713 AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state); 8714 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state); 8715 skip_call |= 8716 ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, VALIDATION_ERROR_01230, 8717 "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 8718 skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, 8719 VALIDATION_ERROR_01231, "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); 8720 std::function<bool()> function = [=]() { 8721 SetImageMemoryValid(dev_data, dst_image_state, true); 8722 return false; 8723 }; 8724 cb_node->validate_functions.push_back(function); 8725 function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBufferToImage()"); }; 8726 cb_node->validate_functions.push_back(function); 8727 8728 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()"); 8729 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE); 8730 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_01242); 8731 for (uint32_t i = 0; i < regionCount; ++i) { 8732 skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout); 8733 skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i, 8734 "vkCmdCopyBufferToImage()"); 8735 } 8736 } else { 8737 assert(0); 8738 } 8739 lock.unlock(); 8740 if (!skip_call) 8741 dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions); 8742} 8743 8744VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, 8745 VkImageLayout srcImageLayout, VkBuffer dstBuffer, 8746 uint32_t regionCount, const VkBufferImageCopy *pRegions) { 8747 bool skip_call = false; 8748 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8749 std::unique_lock<std::mutex> lock(global_lock); 8750 8751 auto cb_node = getCBNode(dev_data, commandBuffer); 8752 auto src_image_state = getImageState(dev_data, srcImage); 8753 auto dst_buff_state = getBufferState(dev_data, dstBuffer); 8754 if (cb_node && src_image_state && dst_buff_state) { 8755 skip_call |= 8756 ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage"); 8757 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImageToBuffer()"); 8758 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyImageToBuffer()"); 8759 // Update bindings between buffer/image and cmd buffer 8760 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state); 8761 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state); 8762 // Validate that SRC image & DST buffer have correct usage flags set 8763 skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, 8764 VALIDATION_ERROR_01248, "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); 8765 skip_call |= 8766 ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01252, 8767 "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 8768 std::function<bool()> function = [=]() { 8769 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImageToBuffer()"); 8770 }; 8771 cb_node->validate_functions.push_back(function); 8772 function = [=]() { 8773 SetBufferMemoryValid(dev_data, dst_buff_state, true); 8774 return false; 8775 }; 8776 cb_node->validate_functions.push_back(function); 8777 8778 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()"); 8779 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER); 8780 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_01260); 8781 for (uint32_t i = 0; i < regionCount; ++i) { 8782 skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout); 8783 skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_image_state, &pRegions[i], i, 8784 "CmdCopyImageToBuffer"); 8785 } 8786 } else { 8787 assert(0); 8788 } 8789 lock.unlock(); 8790 if (!skip_call) 8791 dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions); 8792} 8793 8794VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, 8795 VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) { 8796 bool skip_call = false; 8797 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8798 std::unique_lock<std::mutex> lock(global_lock); 8799 8800 auto cb_node = getCBNode(dev_data, commandBuffer); 8801 auto dst_buff_state = getBufferState(dev_data, dstBuffer); 8802 if (cb_node && dst_buff_state) { 8803 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdUpdateBuffer()"); 8804 // Update bindings between buffer and cmd buffer 8805 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state); 8806 // Validate that DST buffer has correct usage flags set 8807 skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 8808 VALIDATION_ERROR_01146, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 8809 std::function<bool()> function = [=]() { 8810 SetBufferMemoryValid(dev_data, dst_buff_state, true); 8811 return false; 8812 }; 8813 cb_node->validate_functions.push_back(function); 8814 8815 skip_call |= ValidateCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()"); 8816 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_UPDATEBUFFER); 8817 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdUpdateBuffer()", VALIDATION_ERROR_01155); 8818 } else { 8819 assert(0); 8820 } 8821 lock.unlock(); 8822 if (!skip_call) 8823 dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData); 8824} 8825 8826VKAPI_ATTR void VKAPI_CALL 8827CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) { 8828 bool skip_call = false; 8829 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8830 std::unique_lock<std::mutex> lock(global_lock); 8831 8832 auto cb_node = getCBNode(dev_data, commandBuffer); 8833 auto dst_buff_state = getBufferState(dev_data, dstBuffer); 8834 if (cb_node && dst_buff_state) { 8835 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdFillBuffer()"); 8836 // Update bindings between buffer and cmd buffer 8837 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state); 8838 // Validate that DST buffer has correct usage flags set 8839 skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 8840 VALIDATION_ERROR_01137, "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 8841 std::function<bool()> function = [=]() { 8842 SetBufferMemoryValid(dev_data, dst_buff_state, true); 8843 return false; 8844 }; 8845 cb_node->validate_functions.push_back(function); 8846 8847 skip_call |= ValidateCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()"); 8848 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_FILLBUFFER); 8849 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdFillBuffer()", VALIDATION_ERROR_01142); 8850 } else { 8851 assert(0); 8852 } 8853 lock.unlock(); 8854 if (!skip_call) 8855 dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data); 8856} 8857 8858VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, 8859 const VkClearAttachment *pAttachments, uint32_t rectCount, 8860 const VkClearRect *pRects) { 8861 bool skip_call = false; 8862 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8863 std::unique_lock<std::mutex> lock(global_lock); 8864 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8865 if (pCB) { 8866 skip_call |= ValidateCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()"); 8867 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_CLEARATTACHMENTS); 8868 // Warn if this is issued prior to Draw Cmd and clearing the entire attachment 8869 if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) && 8870 (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) { 8871 // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass) 8872 // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must 8873 // call CmdClearAttachments 8874 // Otherwise this seems more like a performance warning. 8875 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8876 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer), 0, 8877 DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS", 8878 "vkCmdClearAttachments() issued on command buffer object 0x%p prior to any Draw Cmds." 8879 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.", 8880 commandBuffer); 8881 } 8882 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()", VALIDATION_ERROR_01122); 8883 } 8884 8885 // Validate that attachment is in reference list of active subpass 8886 if (pCB->activeRenderPass) { 8887 const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->createInfo.ptr(); 8888 const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass]; 8889 auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer); 8890 8891 for (uint32_t i = 0; i < attachmentCount; i++) { 8892 auto clear_desc = &pAttachments[i]; 8893 VkImageView image_view = VK_NULL_HANDLE; 8894 8895 if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) { 8896 if (clear_desc->colorAttachment >= pSD->colorAttachmentCount) { 8897 skip_call |= log_msg( 8898 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 8899 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_01114, "DS", 8900 "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d. %s", 8901 clear_desc->colorAttachment, pCB->activeSubpass, validation_error_map[VALIDATION_ERROR_01114]); 8902 } 8903 else if (pSD->pColorAttachments[clear_desc->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) { 8904 skip_call |= log_msg( 8905 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 8906 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS", 8907 "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored.", 8908 clear_desc->colorAttachment); 8909 } 8910 else { 8911 image_view = framebuffer->createInfo.pAttachments[pSD->pColorAttachments[clear_desc->colorAttachment].attachment]; 8912 } 8913 } else if (clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { 8914 if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass 8915 (pSD->pDepthStencilAttachment->attachment == 8916 VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass 8917 8918 skip_call |= log_msg( 8919 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 8920 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS", 8921 "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored"); 8922 } 8923 else { 8924 image_view = framebuffer->createInfo.pAttachments[pSD->pDepthStencilAttachment->attachment]; 8925 } 8926 } 8927 8928 if (image_view) { 8929 auto image_view_state = getImageViewState(dev_data, image_view); 8930 auto aspects_present = image_view_state->create_info.subresourceRange.aspectMask; 8931 auto extra_aspects = clear_desc->aspectMask & ~aspects_present; 8932 8933 if (extra_aspects) { 8934 skip_call |= log_msg( 8935 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 8936 reinterpret_cast<uint64_t &>(image_view), __LINE__, VALIDATION_ERROR_01125, "DS", 8937 "vkCmdClearAttachments() with aspects not present in image view: %s. %s", 8938 string_VkImageAspectFlagBits((VkImageAspectFlagBits)extra_aspects), 8939 validation_error_map[VALIDATION_ERROR_01125]); 8940 } 8941 } 8942 } 8943 } 8944 lock.unlock(); 8945 if (!skip_call) 8946 dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects); 8947} 8948 8949VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, 8950 VkImageLayout imageLayout, const VkClearColorValue *pColor, 8951 uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { 8952 bool skip_call = false; 8953 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8954 std::unique_lock<std::mutex> lock(global_lock); 8955 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state 8956 8957 auto cb_node = getCBNode(dev_data, commandBuffer); 8958 auto image_state = getImageState(dev_data, image); 8959 if (cb_node && image_state) { 8960 skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearColorImage()"); 8961 AddCommandBufferBindingImage(dev_data, cb_node, image_state); 8962 std::function<bool()> function = [=]() { 8963 SetImageMemoryValid(dev_data, image_state, true); 8964 return false; 8965 }; 8966 cb_node->validate_functions.push_back(function); 8967 8968 skip_call |= ValidateCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()"); 8969 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE); 8970 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()", VALIDATION_ERROR_01096); 8971 } else { 8972 assert(0); 8973 } 8974 for (uint32_t i = 0; i < rangeCount; ++i) { 8975 skip_call |= VerifyClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout, "vkCmdClearColorImage()"); 8976 } 8977 lock.unlock(); 8978 if (!skip_call) 8979 dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges); 8980} 8981 8982VKAPI_ATTR void VKAPI_CALL 8983CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, 8984 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount, 8985 const VkImageSubresourceRange *pRanges) { 8986 bool skip_call = false; 8987 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8988 std::unique_lock<std::mutex> lock(global_lock); 8989 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state 8990 8991 auto cb_node = getCBNode(dev_data, commandBuffer); 8992 auto image_state = getImageState(dev_data, image); 8993 if (cb_node && image_state) { 8994 skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearDepthStencilImage()"); 8995 AddCommandBufferBindingImage(dev_data, cb_node, image_state); 8996 std::function<bool()> function = [=]() { 8997 SetImageMemoryValid(dev_data, image_state, true); 8998 return false; 8999 }; 9000 cb_node->validate_functions.push_back(function); 9001 9002 skip_call |= ValidateCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()"); 9003 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE); 9004 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()", VALIDATION_ERROR_01111); 9005 } else { 9006 assert(0); 9007 } 9008 for (uint32_t i = 0; i < rangeCount; ++i) { 9009 skip_call |= VerifyClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()"); 9010 } 9011 lock.unlock(); 9012 if (!skip_call) 9013 dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges); 9014} 9015 9016VKAPI_ATTR void VKAPI_CALL 9017CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 9018 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) { 9019 bool skip_call = false; 9020 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9021 std::unique_lock<std::mutex> lock(global_lock); 9022 9023 auto cb_node = getCBNode(dev_data, commandBuffer); 9024 auto src_image_state = getImageState(dev_data, srcImage); 9025 auto dst_image_state = getImageState(dev_data, dstImage); 9026 if (cb_node && src_image_state && dst_image_state) { 9027 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdResolveImage()"); 9028 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdResolveImage()"); 9029 // Update bindings between images and cmd buffer 9030 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state); 9031 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state); 9032 std::function<bool()> function = [=]() { 9033 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdResolveImage()"); 9034 }; 9035 cb_node->validate_functions.push_back(function); 9036 function = [=]() { 9037 SetImageMemoryValid(dev_data, dst_image_state, true); 9038 return false; 9039 }; 9040 cb_node->validate_functions.push_back(function); 9041 9042 skip_call |= ValidateCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()"); 9043 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_RESOLVEIMAGE); 9044 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()", VALIDATION_ERROR_01335); 9045 } else { 9046 assert(0); 9047 } 9048 lock.unlock(); 9049 if (!skip_call) 9050 dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, 9051 pRegions); 9052} 9053 9054bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 9055 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9056 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9057 if (pCB) { 9058 pCB->eventToStageMap[event] = stageMask; 9059 } 9060 auto queue_data = dev_data->queueMap.find(queue); 9061 if (queue_data != dev_data->queueMap.end()) { 9062 queue_data->second.eventToStageMap[event] = stageMask; 9063 } 9064 return false; 9065} 9066 9067VKAPI_ATTR void VKAPI_CALL 9068CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 9069 bool skip_call = false; 9070 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9071 std::unique_lock<std::mutex> lock(global_lock); 9072 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9073 if (pCB) { 9074 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()"); 9075 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETEVENT); 9076 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_00238); 9077 auto event_state = getEventNode(dev_data, event); 9078 if (event_state) { 9079 addCommandBufferBinding(&event_state->cb_bindings, 9080 {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB); 9081 event_state->cb_bindings.insert(pCB); 9082 } 9083 pCB->events.push_back(event); 9084 if (!pCB->waitedEvents.count(event)) { 9085 pCB->writeEventsBeforeWait.push_back(event); 9086 } 9087 std::function<bool(VkQueue)> eventUpdate = 9088 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask); 9089 pCB->eventUpdates.push_back(eventUpdate); 9090 } 9091 lock.unlock(); 9092 if (!skip_call) 9093 dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask); 9094} 9095 9096VKAPI_ATTR void VKAPI_CALL 9097CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 9098 bool skip_call = false; 9099 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9100 std::unique_lock<std::mutex> lock(global_lock); 9101 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9102 if (pCB) { 9103 skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()"); 9104 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETEVENT); 9105 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_00249); 9106 auto event_state = getEventNode(dev_data, event); 9107 if (event_state) { 9108 addCommandBufferBinding(&event_state->cb_bindings, 9109 {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB); 9110 event_state->cb_bindings.insert(pCB); 9111 } 9112 pCB->events.push_back(event); 9113 if (!pCB->waitedEvents.count(event)) { 9114 pCB->writeEventsBeforeWait.push_back(event); 9115 } 9116 std::function<bool(VkQueue)> eventUpdate = 9117 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0)); 9118 pCB->eventUpdates.push_back(eventUpdate); 9119 } 9120 lock.unlock(); 9121 if (!skip_call) 9122 dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask); 9123} 9124 9125// TODO: Separate validation and layout state updates 9126static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, 9127 const VkImageMemoryBarrier *pImgMemBarriers) { 9128 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 9129 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 9130 bool skip = false; 9131 uint32_t levelCount = 0; 9132 uint32_t layerCount = 0; 9133 9134 for (uint32_t i = 0; i < memBarrierCount; ++i) { 9135 auto mem_barrier = &pImgMemBarriers[i]; 9136 if (!mem_barrier) 9137 continue; 9138 // TODO: Do not iterate over every possibility - consolidate where 9139 // possible 9140 ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image); 9141 9142 for (uint32_t j = 0; j < levelCount; j++) { 9143 uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j; 9144 for (uint32_t k = 0; k < layerCount; k++) { 9145 uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k; 9146 VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer}; 9147 IMAGE_CMD_BUF_LAYOUT_NODE node; 9148 if (!FindLayout(pCB, mem_barrier->image, sub, node)) { 9149 SetLayout(pCB, mem_barrier->image, sub, 9150 IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout)); 9151 continue; 9152 } 9153 if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) { 9154 // TODO: Set memory invalid which is in mem_tracker currently 9155 } else if (node.layout != mem_barrier->oldLayout) { 9156 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9157 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s " 9158 "when current layout is %s.", 9159 string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout)); 9160 } 9161 SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout); 9162 } 9163 } 9164 } 9165 return skip; 9166} 9167 9168// Print readable FlagBits in FlagMask 9169static std::string string_VkAccessFlags(VkAccessFlags accessMask) { 9170 std::string result; 9171 std::string separator; 9172 9173 if (accessMask == 0) { 9174 result = "[None]"; 9175 } else { 9176 result = "["; 9177 for (auto i = 0; i < 32; i++) { 9178 if (accessMask & (1 << i)) { 9179 result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i)); 9180 separator = " | "; 9181 } 9182 } 9183 result = result + "]"; 9184 } 9185 return result; 9186} 9187 9188// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set. 9189// If required_bit is zero, accessMask must have at least one of 'optional_bits' set 9190// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions 9191static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask, 9192 const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, 9193 const char *type) { 9194 bool skip_call = false; 9195 9196 if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) { 9197 if (accessMask & ~(required_bit | optional_bits)) { 9198 // TODO: Verify against Valid Use 9199 skip_call |= 9200 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9201 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.", 9202 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout)); 9203 } 9204 } else { 9205 if (!required_bit) { 9206 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9207 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d " 9208 "%s when layout is %s, unless the app has previously added a " 9209 "barrier for this transition.", 9210 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits, 9211 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout)); 9212 } else { 9213 std::string opt_bits; 9214 if (optional_bits != 0) { 9215 std::stringstream ss; 9216 ss << optional_bits; 9217 opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits); 9218 } 9219 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9220 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when " 9221 "layout is %s, unless the app has previously added a barrier for " 9222 "this transition.", 9223 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit, 9224 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout)); 9225 } 9226 } 9227 return skip_call; 9228} 9229 9230static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask, 9231 const VkImageLayout &layout, const char *type) { 9232 bool skip_call = false; 9233 switch (layout) { 9234 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: { 9235 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, 9236 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type); 9237 break; 9238 } 9239 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: { 9240 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, 9241 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type); 9242 break; 9243 } 9244 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: { 9245 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type); 9246 break; 9247 } 9248 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: { 9249 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0, 9250 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | 9251 VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type); 9252 break; 9253 } 9254 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: { 9255 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0, 9256 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type); 9257 break; 9258 } 9259 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: { 9260 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type); 9261 break; 9262 } 9263 case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: { 9264 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type); 9265 break; 9266 } 9267 case VK_IMAGE_LAYOUT_UNDEFINED: { 9268 if (accessMask != 0) { 9269 // TODO: Verify against Valid Use section spec 9270 skip_call |= 9271 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9272 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.", 9273 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout)); 9274 } 9275 break; 9276 } 9277 case VK_IMAGE_LAYOUT_GENERAL: 9278 default: { break; } 9279 } 9280 return skip_call; 9281} 9282 9283static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, 9284 const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount, 9285 const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount, 9286 const VkImageMemoryBarrier *pImageMemBarriers) { 9287 bool skip = false; 9288 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 9289 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 9290 if (pCB->activeRenderPass && memBarrierCount) { 9291 if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) { 9292 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9293 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d " 9294 "with no self dependency specified.", 9295 funcName, pCB->activeSubpass); 9296 } 9297 } 9298 for (uint32_t i = 0; i < imageMemBarrierCount; ++i) { 9299 auto mem_barrier = &pImageMemBarriers[i]; 9300 auto image_data = getImageState(dev_data, mem_barrier->image); 9301 if (image_data) { 9302 uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex; 9303 uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex; 9304 if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { 9305 // srcQueueFamilyIndex and dstQueueFamilyIndex must both 9306 // be VK_QUEUE_FAMILY_IGNORED 9307 if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) { 9308 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9309 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 9310 "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of " 9311 "VK_SHARING_MODE_CONCURRENT. Src and dst " 9312 "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.", 9313 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image)); 9314 } 9315 } else { 9316 // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and 9317 // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED, 9318 // or both be a valid queue family 9319 if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) && 9320 (src_q_f_index != dst_q_f_index)) { 9321 skip |= 9322 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9323 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode " 9324 "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or " 9325 "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both " 9326 "must be.", 9327 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image)); 9328 } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) && 9329 ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) || 9330 (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) { 9331 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9332 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 9333 "%s: Image 0x%" PRIx64 " was created with sharingMode " 9334 "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d" 9335 " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER 9336 "queueFamilies crated for this device.", 9337 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, dst_q_f_index, 9338 dev_data->phys_dev_properties.queue_family_properties.size()); 9339 } 9340 } 9341 } 9342 9343 if (mem_barrier) { 9344 if (mem_barrier->oldLayout != mem_barrier->newLayout) { 9345 skip |= 9346 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source"); 9347 skip |= 9348 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest"); 9349 } 9350 if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) { 9351 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9352 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or " 9353 "PREINITIALIZED.", 9354 funcName); 9355 } 9356 auto image_data = getImageState(dev_data, mem_barrier->image); 9357 VkFormat format = VK_FORMAT_UNDEFINED; 9358 uint32_t arrayLayers = 0, mipLevels = 0; 9359 bool imageFound = false; 9360 if (image_data) { 9361 format = image_data->createInfo.format; 9362 arrayLayers = image_data->createInfo.arrayLayers; 9363 mipLevels = image_data->createInfo.mipLevels; 9364 imageFound = true; 9365 } else if (dev_data->device_extensions.wsi_enabled) { 9366 auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image); 9367 if (imageswap_data) { 9368 auto swapchain_data = getSwapchainNode(dev_data, imageswap_data); 9369 if (swapchain_data) { 9370 format = swapchain_data->createInfo.imageFormat; 9371 arrayLayers = swapchain_data->createInfo.imageArrayLayers; 9372 mipLevels = 1; 9373 imageFound = true; 9374 } 9375 } 9376 } 9377 if (imageFound) { 9378 auto aspect_mask = mem_barrier->subresourceRange.aspectMask; 9379 skip |= ValidateImageAspectMask(dev_data, image_data->image, format, aspect_mask, funcName); 9380 int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) 9381 ? 1 9382 : mem_barrier->subresourceRange.layerCount; 9383 if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) { 9384 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9385 __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the " 9386 "baseArrayLayer (%d) and layerCount (%d) be less " 9387 "than or equal to the total number of layers (%d).", 9388 funcName, mem_barrier->subresourceRange.baseArrayLayer, 9389 mem_barrier->subresourceRange.layerCount, arrayLayers); 9390 } 9391 int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS) 9392 ? 1 9393 : mem_barrier->subresourceRange.levelCount; 9394 if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) { 9395 skip |= log_msg( 9396 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9397 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel " 9398 "(%d) and levelCount (%d) be less than or equal to " 9399 "the total number of levels (%d).", 9400 funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount, mipLevels); 9401 } 9402 } 9403 } 9404 } 9405 for (uint32_t i = 0; i < bufferBarrierCount; ++i) { 9406 auto mem_barrier = &pBufferMemBarriers[i]; 9407 if (pCB->activeRenderPass) { 9408 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9409 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName); 9410 } 9411 if (!mem_barrier) 9412 continue; 9413 9414 // Validate buffer barrier queue family indices 9415 if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED && 9416 mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) || 9417 (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED && 9418 mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) { 9419 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9420 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 9421 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater " 9422 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.", 9423 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 9424 dev_data->phys_dev_properties.queue_family_properties.size()); 9425 } 9426 9427 auto buffer_state = getBufferState(dev_data, mem_barrier->buffer); 9428 if (buffer_state) { 9429 auto buffer_size = buffer_state->requirements.size; 9430 if (mem_barrier->offset >= buffer_size) { 9431 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9432 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 9433 " which is not less than total size 0x%" PRIx64 ".", 9434 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 9435 reinterpret_cast<const uint64_t &>(mem_barrier->offset), 9436 reinterpret_cast<const uint64_t &>(buffer_size)); 9437 } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) { 9438 skip |= log_msg( 9439 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9440 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64 9441 " whose sum is greater than total size 0x%" PRIx64 ".", 9442 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 9443 reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size), 9444 reinterpret_cast<const uint64_t &>(buffer_size)); 9445 } 9446 } 9447 } 9448 return skip; 9449} 9450 9451bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) { 9452 bool skip_call = false; 9453 VkPipelineStageFlags stageMask = 0; 9454 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 9455 for (uint32_t i = 0; i < eventCount; ++i) { 9456 auto event = pCB->events[firstEventIndex + i]; 9457 auto queue_data = dev_data->queueMap.find(queue); 9458 if (queue_data == dev_data->queueMap.end()) 9459 return false; 9460 auto event_data = queue_data->second.eventToStageMap.find(event); 9461 if (event_data != queue_data->second.eventToStageMap.end()) { 9462 stageMask |= event_data->second; 9463 } else { 9464 auto global_event_data = getEventNode(dev_data, event); 9465 if (!global_event_data) { 9466 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, 9467 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS", 9468 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", 9469 reinterpret_cast<const uint64_t &>(event)); 9470 } else { 9471 stageMask |= global_event_data->stageMask; 9472 } 9473 } 9474 } 9475 // TODO: Need to validate that host_bit is only set if set event is called 9476 // but set event can be called at any time. 9477 if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) { 9478 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9479 DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents " 9480 "using srcStageMask 0x%X which must be the bitwise " 9481 "OR of the stageMask parameters used in calls to " 9482 "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if " 9483 "used with vkSetEvent but instead is 0x%X.", 9484 sourceStageMask, stageMask); 9485 } 9486 return skip_call; 9487} 9488 9489// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped 9490static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = { 9491 {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT}, 9492 {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT}, 9493 {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT}, 9494 {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, 9495 {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, 9496 {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, 9497 {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, 9498 {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT}, 9499 {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT}, 9500 {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT}, 9501 {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT}, 9502 {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT}, 9503 {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT}, 9504 {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}}; 9505 9506static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, 9507 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, 9508 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 9509 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 9510 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, 9511 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, 9512 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, 9513 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 9514 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, 9515 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, 9516 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 9517 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 9518 VK_PIPELINE_STAGE_TRANSFER_BIT, 9519 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT}; 9520 9521bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask, 9522 VkQueueFlags queue_flags, const char *function, const char *src_or_dest, 9523 UNIQUE_VALIDATION_ERROR_CODE error_code) { 9524 bool skip = false; 9525 // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags 9526 for (const auto &item : stage_flag_bit_array) { 9527 if (stage_mask & item) { 9528 if ((supported_pipeline_stages_table[item] & queue_flags) == 0) { 9529 skip |= 9530 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9531 reinterpret_cast<uint64_t &>(command_buffer), __LINE__, error_code, "DL", 9532 "%s(): %s flag %s is not compatible with the queue family properties of this " 9533 "command buffer. %s", 9534 function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)), 9535 validation_error_map[error_code]); 9536 } 9537 } 9538 } 9539 return skip; 9540} 9541 9542bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, 9543 VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask, 9544 const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) { 9545 bool skip = false; 9546 uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex; 9547 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map); 9548 auto physical_device_state = getPhysicalDeviceState(instance_data, dev_data->physical_device); 9549 9550 // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family 9551 // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool 9552 // that commandBuffer was allocated from, as specified in the table of supported pipeline stages. 9553 9554 if (queue_family_index < physical_device_state->queue_family_properties.size()) { 9555 VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags; 9556 9557 if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) { 9558 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags, 9559 function, "srcStageMask", error_code); 9560 } 9561 if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) { 9562 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags, 9563 function, "dstStageMask", error_code); 9564 } 9565 } 9566 return skip; 9567} 9568 9569VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, 9570 VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, 9571 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, 9572 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, 9573 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { 9574 bool skip = false; 9575 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9576 std::unique_lock<std::mutex> lock(global_lock); 9577 GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer); 9578 if (cb_state) { 9579 skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents", 9580 VALIDATION_ERROR_02510); 9581 auto first_event_index = cb_state->events.size(); 9582 for (uint32_t i = 0; i < eventCount; ++i) { 9583 auto event_state = getEventNode(dev_data, pEvents[i]); 9584 if (event_state) { 9585 addCommandBufferBinding(&event_state->cb_bindings, 9586 {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, 9587 cb_state); 9588 event_state->cb_bindings.insert(cb_state); 9589 } 9590 cb_state->waitedEvents.insert(pEvents[i]); 9591 cb_state->events.push_back(pEvents[i]); 9592 } 9593 std::function<bool(VkQueue)> event_update = 9594 std::bind(validateEventStageMask, std::placeholders::_1, cb_state, eventCount, first_event_index, sourceStageMask); 9595 cb_state->eventUpdates.push_back(event_update); 9596 if (cb_state->state == CB_RECORDING) { 9597 skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()"); 9598 UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_WAITEVENTS); 9599 } else { 9600 skip |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()"); 9601 } 9602 skip |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers); 9603 skip |= ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 9604 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 9605 } 9606 lock.unlock(); 9607 if (!skip) 9608 dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, 9609 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, 9610 imageMemoryBarrierCount, pImageMemoryBarriers); 9611} 9612 9613VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, 9614 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, 9615 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, 9616 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, 9617 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { 9618 bool skip = false; 9619 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9620 std::unique_lock<std::mutex> lock(global_lock); 9621 GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer); 9622 if (cb_state) { 9623 skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier", 9624 VALIDATION_ERROR_02513); 9625 skip |= ValidateCmd(dev_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); 9626 UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_PIPELINEBARRIER); 9627 skip |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers); 9628 skip |= ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, 9629 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 9630 } 9631 lock.unlock(); 9632 if (!skip) 9633 dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, 9634 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, 9635 imageMemoryBarrierCount, pImageMemoryBarriers); 9636} 9637 9638bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) { 9639 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9640 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9641 if (pCB) { 9642 pCB->queryToStateMap[object] = value; 9643 } 9644 auto queue_data = dev_data->queueMap.find(queue); 9645 if (queue_data != dev_data->queueMap.end()) { 9646 queue_data->second.queryToStateMap[object] = value; 9647 } 9648 return false; 9649} 9650 9651VKAPI_ATTR void VKAPI_CALL 9652CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { 9653 bool skip_call = false; 9654 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9655 std::unique_lock<std::mutex> lock(global_lock); 9656 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9657 if (pCB) { 9658 QueryObject query = {queryPool, slot}; 9659 pCB->activeQueries.insert(query); 9660 if (!pCB->startedQueries.count(query)) { 9661 pCB->startedQueries.insert(query); 9662 } 9663 skip_call |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()"); 9664 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BEGINQUERY); 9665 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings, 9666 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB); 9667 } 9668 lock.unlock(); 9669 if (!skip_call) 9670 dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags); 9671} 9672 9673VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { 9674 bool skip_call = false; 9675 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9676 std::unique_lock<std::mutex> lock(global_lock); 9677 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9678 if (pCB) { 9679 QueryObject query = {queryPool, slot}; 9680 if (!pCB->activeQueries.count(query)) { 9681 skip_call |= 9682 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9683 DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d", 9684 (uint64_t)(queryPool), slot); 9685 } else { 9686 pCB->activeQueries.erase(query); 9687 } 9688 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true); 9689 pCB->queryUpdates.push_back(queryUpdate); 9690 if (pCB->state == CB_RECORDING) { 9691 skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()"); 9692 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDQUERY); 9693 } else { 9694 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()"); 9695 } 9696 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings, 9697 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB); 9698 } 9699 lock.unlock(); 9700 if (!skip_call) 9701 dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot); 9702} 9703 9704VKAPI_ATTR void VKAPI_CALL 9705CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { 9706 bool skip_call = false; 9707 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9708 std::unique_lock<std::mutex> lock(global_lock); 9709 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9710 if (pCB) { 9711 for (uint32_t i = 0; i < queryCount; i++) { 9712 QueryObject query = {queryPool, firstQuery + i}; 9713 pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents; 9714 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false); 9715 pCB->queryUpdates.push_back(queryUpdate); 9716 } 9717 if (pCB->state == CB_RECORDING) { 9718 skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()"); 9719 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETQUERYPOOL); 9720 } else { 9721 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()"); 9722 } 9723 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetQueryPool()", VALIDATION_ERROR_01025); 9724 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings, 9725 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB); 9726 } 9727 lock.unlock(); 9728 if (!skip_call) 9729 dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount); 9730} 9731 9732bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) { 9733 bool skip_call = false; 9734 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map); 9735 auto queue_data = dev_data->queueMap.find(queue); 9736 if (queue_data == dev_data->queueMap.end()) 9737 return false; 9738 for (uint32_t i = 0; i < queryCount; i++) { 9739 QueryObject query = {queryPool, firstQuery + i}; 9740 auto query_data = queue_data->second.queryToStateMap.find(query); 9741 bool fail = false; 9742 if (query_data != queue_data->second.queryToStateMap.end()) { 9743 if (!query_data->second) { 9744 fail = true; 9745 } 9746 } else { 9747 auto global_query_data = dev_data->queryToStateMap.find(query); 9748 if (global_query_data != dev_data->queryToStateMap.end()) { 9749 if (!global_query_data->second) { 9750 fail = true; 9751 } 9752 } else { 9753 fail = true; 9754 } 9755 } 9756 if (fail) { 9757 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9758 DRAWSTATE_INVALID_QUERY, "DS", 9759 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d", 9760 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i); 9761 } 9762 } 9763 return skip_call; 9764} 9765 9766VKAPI_ATTR void VKAPI_CALL 9767CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, 9768 VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { 9769 bool skip_call = false; 9770 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9771 std::unique_lock<std::mutex> lock(global_lock); 9772 9773 auto cb_node = getCBNode(dev_data, commandBuffer); 9774 auto dst_buff_state = getBufferState(dev_data, dstBuffer); 9775 if (cb_node && dst_buff_state) { 9776 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()"); 9777 // Update bindings between buffer and cmd buffer 9778 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state); 9779 // Validate that DST buffer has correct usage flags set 9780 skip_call |= 9781 ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01066, 9782 "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 9783 std::function<bool()> function = [=]() { 9784 SetBufferMemoryValid(dev_data, dst_buff_state, true); 9785 return false; 9786 }; 9787 cb_node->validate_functions.push_back(function); 9788 std::function<bool(VkQueue)> queryUpdate = 9789 std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery); 9790 cb_node->queryUpdates.push_back(queryUpdate); 9791 if (cb_node->state == CB_RECORDING) { 9792 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()"); 9793 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS); 9794 } else { 9795 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()"); 9796 } 9797 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_01074); 9798 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings, 9799 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node); 9800 } else { 9801 assert(0); 9802 } 9803 lock.unlock(); 9804 if (!skip_call) 9805 dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, 9806 stride, flags); 9807} 9808 9809VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, 9810 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, 9811 const void *pValues) { 9812 bool skip_call = false; 9813 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9814 std::unique_lock<std::mutex> lock(global_lock); 9815 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9816 if (pCB) { 9817 if (pCB->state == CB_RECORDING) { 9818 skip_call |= ValidateCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()"); 9819 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_PUSHCONSTANTS); 9820 } else { 9821 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()"); 9822 } 9823 } 9824 skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()"); 9825 if (0 == stageFlags) { 9826 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9827 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set."); 9828 } 9829 9830 // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout. 9831 auto pipeline_layout = getPipelineLayout(dev_data, layout); 9832 // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is 9833 // contained in the pipeline ranges. 9834 // Build a {start, end} span list for ranges with matching stage flags. 9835 const auto &ranges = pipeline_layout->push_constant_ranges; 9836 struct span { 9837 uint32_t start; 9838 uint32_t end; 9839 }; 9840 std::vector<span> spans; 9841 spans.reserve(ranges.size()); 9842 for (const auto &iter : ranges) { 9843 if (iter.stageFlags == stageFlags) { 9844 spans.push_back({iter.offset, iter.offset + iter.size}); 9845 } 9846 } 9847 if (spans.size() == 0) { 9848 // There were no ranges that matched the stageFlags. 9849 skip_call |= 9850 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9851 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match " 9852 "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".", 9853 (uint32_t)stageFlags, (uint64_t)layout); 9854 } else { 9855 // Sort span list by start value. 9856 struct comparer { 9857 bool operator()(struct span i, struct span j) { return i.start < j.start; } 9858 } my_comparer; 9859 std::sort(spans.begin(), spans.end(), my_comparer); 9860 9861 // Examine two spans at a time. 9862 std::vector<span>::iterator current = spans.begin(); 9863 std::vector<span>::iterator next = current + 1; 9864 while (next != spans.end()) { 9865 if (current->end < next->start) { 9866 // There is a gap; cannot coalesce. Move to the next two spans. 9867 ++current; 9868 ++next; 9869 } else { 9870 // Coalesce the two spans. The start of the next span 9871 // is within the current span, so pick the larger of 9872 // the end values to extend the current span. 9873 // Then delete the next span and set next to the span after it. 9874 current->end = max(current->end, next->end); 9875 next = spans.erase(next); 9876 } 9877 } 9878 9879 // Now we can check if the incoming range is within any of the spans. 9880 bool contained_in_a_range = false; 9881 for (uint32_t i = 0; i < spans.size(); ++i) { 9882 if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) { 9883 contained_in_a_range = true; 9884 break; 9885 } 9886 } 9887 if (!contained_in_a_range) { 9888 skip_call |= 9889 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9890 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Push constant range [%d, %d) " 9891 "with stageFlags = 0x%" PRIx32 " " 9892 "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".", 9893 offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout); 9894 } 9895 } 9896 lock.unlock(); 9897 if (!skip_call) 9898 dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues); 9899} 9900 9901VKAPI_ATTR void VKAPI_CALL 9902CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { 9903 bool skip_call = false; 9904 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9905 std::unique_lock<std::mutex> lock(global_lock); 9906 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9907 if (pCB) { 9908 QueryObject query = {queryPool, slot}; 9909 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true); 9910 pCB->queryUpdates.push_back(queryUpdate); 9911 if (pCB->state == CB_RECORDING) { 9912 skip_call |= ValidateCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()"); 9913 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_WRITETIMESTAMP); 9914 } else { 9915 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()"); 9916 } 9917 } 9918 lock.unlock(); 9919 if (!skip_call) 9920 dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot); 9921} 9922 9923static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments, 9924 const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) { 9925 bool skip_call = false; 9926 9927 for (uint32_t attach = 0; attach < count; attach++) { 9928 if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) { 9929 // Attachment counts are verified elsewhere, but prevent an invalid access 9930 if (attachments[attach].attachment < fbci->attachmentCount) { 9931 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment]; 9932 auto view_state = getImageViewState(dev_data, *image_view); 9933 if (view_state) { 9934 const VkImageCreateInfo *ici = &getImageState(dev_data, view_state->create_info.image)->createInfo; 9935 if (ici != nullptr) { 9936 if ((ici->usage & usage_flag) == 0) { 9937 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 9938 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS", 9939 "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's " 9940 "IMAGE_USAGE flags (%s).", 9941 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag)); 9942 } 9943 } 9944 } 9945 } 9946 } 9947 } 9948 return skip_call; 9949} 9950 9951// Validate VkFramebufferCreateInfo which includes: 9952// 1. attachmentCount equals renderPass attachmentCount 9953// 2. corresponding framebuffer and renderpass attachments have matching formats 9954// 3. corresponding framebuffer and renderpass attachments have matching sample counts 9955// 4. fb attachments only have a single mip level 9956// 5. fb attachment dimensions are each at least as large as the fb 9957// 6. fb attachments use idenity swizzle 9958// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set 9959// 8. fb dimensions are within physical device limits 9960static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) { 9961 bool skip_call = false; 9962 9963 auto rp_state = getRenderPassState(dev_data, pCreateInfo->renderPass); 9964 if (rp_state) { 9965 const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr(); 9966 if (rpci->attachmentCount != pCreateInfo->attachmentCount) { 9967 skip_call |= log_msg( 9968 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 9969 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 9970 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of " 9971 "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.", 9972 pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass)); 9973 } else { 9974 // attachmentCounts match, so make sure corresponding attachment details line up 9975 const VkImageView *image_views = pCreateInfo->pAttachments; 9976 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 9977 auto view_state = getImageViewState(dev_data, image_views[i]); 9978 auto &ivci = view_state->create_info; 9979 if (ivci.format != rpci->pAttachments[i].format) { 9980 skip_call |= log_msg( 9981 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 9982 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, 9983 "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match " 9984 "the format of " 9985 "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").", 9986 i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format), 9987 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass)); 9988 } 9989 const VkImageCreateInfo *ici = &getImageState(dev_data, ivci.image)->createInfo; 9990 if (ici->samples != rpci->pAttachments[i].samples) { 9991 skip_call |= log_msg( 9992 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 9993 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, 9994 "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match " 9995 "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").", 9996 i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples), 9997 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass)); 9998 } 9999 // Verify that view only has a single mip level 10000 if (ivci.subresourceRange.levelCount != 1) { 10001 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 10002 __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS", 10003 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u " 10004 "but only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.", 10005 i, ivci.subresourceRange.levelCount); 10006 } 10007 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel; 10008 uint32_t mip_width = max(1u, ici->extent.width >> mip_level); 10009 uint32_t mip_height = max(1u, ici->extent.height >> mip_level); 10010 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) || 10011 (mip_height < pCreateInfo->height)) { 10012 skip_call |= 10013 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 10014 DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS", 10015 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller " 10016 "than the corresponding " 10017 "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective " 10018 "dimensions for " 10019 "attachment #%u, framebuffer:\n" 10020 "width: %u, %u\n" 10021 "height: %u, %u\n" 10022 "layerCount: %u, %u\n", 10023 i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height, 10024 pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers); 10025 } 10026 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) || 10027 ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) || 10028 ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) || 10029 ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) { 10030 skip_call |= log_msg( 10031 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 10032 DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS", 10033 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer " 10034 "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n" 10035 "r swizzle = %s\n" 10036 "g swizzle = %s\n" 10037 "b swizzle = %s\n" 10038 "a swizzle = %s\n", 10039 i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g), 10040 string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a)); 10041 } 10042 } 10043 } 10044 // Verify correct attachment usage flags 10045 for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) { 10046 // Verify input attachments: 10047 skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, 10048 rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT); 10049 // Verify color attachments: 10050 skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, 10051 rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT); 10052 // Verify depth/stencil attachments: 10053 if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) { 10054 skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo, 10055 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT); 10056 } 10057 } 10058 } else { 10059 skip_call |= 10060 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 10061 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 10062 "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").", 10063 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass)); 10064 } 10065 // Verify FB dimensions are within physical device limits 10066 if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) || 10067 (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) || 10068 (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) { 10069 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 10070 DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS", 10071 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. " 10072 "Here are the respective dimensions: requested, device max:\n" 10073 "width: %u, %u\n" 10074 "height: %u, %u\n" 10075 "layerCount: %u, %u\n", 10076 pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth, 10077 pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight, 10078 pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers); 10079 } 10080 return skip_call; 10081} 10082 10083// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object 10084// Return true if an error is encountered and callback returns true to skip call down chain 10085// false indicates that call down chain should proceed 10086static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) { 10087 // TODO : Verify that renderPass FB is created with is compatible with FB 10088 bool skip_call = false; 10089 skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo); 10090 return skip_call; 10091} 10092 10093// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object 10094static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) { 10095 // Shadow create info and store in map 10096 std::unique_ptr<FRAMEBUFFER_STATE> fb_state( 10097 new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr())); 10098 10099 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 10100 VkImageView view = pCreateInfo->pAttachments[i]; 10101 auto view_state = getImageViewState(dev_data, view); 10102 if (!view_state) { 10103 continue; 10104 } 10105 MT_FB_ATTACHMENT_INFO fb_info; 10106 fb_info.mem = getImageState(dev_data, view_state->create_info.image)->binding.mem; 10107 fb_info.view_state = view_state; 10108 fb_info.image = view_state->create_info.image; 10109 fb_state->attachments.push_back(fb_info); 10110 } 10111 dev_data->frameBufferMap[fb] = std::move(fb_state); 10112} 10113 10114VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, 10115 const VkAllocationCallbacks *pAllocator, 10116 VkFramebuffer *pFramebuffer) { 10117 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10118 std::unique_lock<std::mutex> lock(global_lock); 10119 bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo); 10120 lock.unlock(); 10121 10122 if (skip_call) 10123 return VK_ERROR_VALIDATION_FAILED_EXT; 10124 10125 VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer); 10126 10127 if (VK_SUCCESS == result) { 10128 lock.lock(); 10129 PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer); 10130 lock.unlock(); 10131 } 10132 return result; 10133} 10134 10135static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node, 10136 std::unordered_set<uint32_t> &processed_nodes) { 10137 // If we have already checked this node we have not found a dependency path so return false. 10138 if (processed_nodes.count(index)) 10139 return false; 10140 processed_nodes.insert(index); 10141 const DAGNode &node = subpass_to_node[index]; 10142 // Look for a dependency path. If one exists return true else recurse on the previous nodes. 10143 if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) { 10144 for (auto elem : node.prev) { 10145 if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) 10146 return true; 10147 } 10148 } else { 10149 return true; 10150 } 10151 return false; 10152} 10153 10154static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses, 10155 const std::vector<DAGNode> &subpass_to_node, bool &skip_call) { 10156 bool result = true; 10157 // Loop through all subpasses that share the same attachment and make sure a dependency exists 10158 for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) { 10159 if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) 10160 continue; 10161 const DAGNode &node = subpass_to_node[subpass]; 10162 // Check for a specified dependency between the two nodes. If one exists we are done. 10163 auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]); 10164 auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]); 10165 if (prev_elem == node.prev.end() && next_elem == node.next.end()) { 10166 // If no dependency exits an implicit dependency still might. If not, throw an error. 10167 std::unordered_set<uint32_t> processed_nodes; 10168 if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) || 10169 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) { 10170 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 10171 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 10172 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass, 10173 dependent_subpasses[k]); 10174 result = false; 10175 } 10176 } 10177 } 10178 return result; 10179} 10180 10181static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index, 10182 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) { 10183 const DAGNode &node = subpass_to_node[index]; 10184 // If this node writes to the attachment return true as next nodes need to preserve the attachment. 10185 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index]; 10186 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 10187 if (attachment == subpass.pColorAttachments[j].attachment) 10188 return true; 10189 } 10190 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 10191 if (attachment == subpass.pDepthStencilAttachment->attachment) 10192 return true; 10193 } 10194 bool result = false; 10195 // Loop through previous nodes and see if any of them write to the attachment. 10196 for (auto elem : node.prev) { 10197 result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call); 10198 } 10199 // If the attachment was written to by a previous node than this node needs to preserve it. 10200 if (result && depth > 0) { 10201 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index]; 10202 bool has_preserved = false; 10203 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { 10204 if (subpass.pPreserveAttachments[j] == attachment) { 10205 has_preserved = true; 10206 break; 10207 } 10208 } 10209 if (!has_preserved) { 10210 skip_call |= 10211 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10212 DRAWSTATE_INVALID_RENDERPASS, "DS", 10213 "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index); 10214 } 10215 } 10216 return result; 10217} 10218 10219template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) { 10220 return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) || 10221 ((offset1 > offset2) && (offset1 < (offset2 + size2))); 10222} 10223 10224bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) { 10225 return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) && 10226 isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount)); 10227} 10228 10229static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer, 10230 RENDER_PASS_STATE const *renderPass) { 10231 bool skip_call = false; 10232 auto const pFramebufferInfo = framebuffer->createInfo.ptr(); 10233 auto const pCreateInfo = renderPass->createInfo.ptr(); 10234 auto const & subpass_to_node = renderPass->subpassToNode; 10235 std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount); 10236 std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount); 10237 std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount); 10238 // Find overlapping attachments 10239 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 10240 for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) { 10241 VkImageView viewi = pFramebufferInfo->pAttachments[i]; 10242 VkImageView viewj = pFramebufferInfo->pAttachments[j]; 10243 if (viewi == viewj) { 10244 overlapping_attachments[i].push_back(j); 10245 overlapping_attachments[j].push_back(i); 10246 continue; 10247 } 10248 auto view_state_i = getImageViewState(dev_data, viewi); 10249 auto view_state_j = getImageViewState(dev_data, viewj); 10250 if (!view_state_i || !view_state_j) { 10251 continue; 10252 } 10253 auto view_ci_i = view_state_i->create_info; 10254 auto view_ci_j = view_state_j->create_info; 10255 if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) { 10256 overlapping_attachments[i].push_back(j); 10257 overlapping_attachments[j].push_back(i); 10258 continue; 10259 } 10260 auto image_data_i = getImageState(dev_data, view_ci_i.image); 10261 auto image_data_j = getImageState(dev_data, view_ci_j.image); 10262 if (!image_data_i || !image_data_j) { 10263 continue; 10264 } 10265 if (image_data_i->binding.mem == image_data_j->binding.mem && 10266 isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset, 10267 image_data_j->binding.size)) { 10268 overlapping_attachments[i].push_back(j); 10269 overlapping_attachments[j].push_back(i); 10270 } 10271 } 10272 } 10273 for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) { 10274 uint32_t attachment = i; 10275 for (auto other_attachment : overlapping_attachments[i]) { 10276 if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) { 10277 skip_call |= 10278 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10279 DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't " 10280 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.", 10281 attachment, other_attachment); 10282 } 10283 if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) { 10284 skip_call |= 10285 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10286 DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't " 10287 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.", 10288 other_attachment, attachment); 10289 } 10290 } 10291 } 10292 // Find for each attachment the subpasses that use them. 10293 unordered_set<uint32_t> attachmentIndices; 10294 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 10295 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 10296 attachmentIndices.clear(); 10297 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 10298 uint32_t attachment = subpass.pInputAttachments[j].attachment; 10299 if (attachment == VK_ATTACHMENT_UNUSED) 10300 continue; 10301 input_attachment_to_subpass[attachment].push_back(i); 10302 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 10303 input_attachment_to_subpass[overlapping_attachment].push_back(i); 10304 } 10305 } 10306 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 10307 uint32_t attachment = subpass.pColorAttachments[j].attachment; 10308 if (attachment == VK_ATTACHMENT_UNUSED) 10309 continue; 10310 output_attachment_to_subpass[attachment].push_back(i); 10311 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 10312 output_attachment_to_subpass[overlapping_attachment].push_back(i); 10313 } 10314 attachmentIndices.insert(attachment); 10315 } 10316 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 10317 uint32_t attachment = subpass.pDepthStencilAttachment->attachment; 10318 output_attachment_to_subpass[attachment].push_back(i); 10319 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 10320 output_attachment_to_subpass[overlapping_attachment].push_back(i); 10321 } 10322 10323 if (attachmentIndices.count(attachment)) { 10324 skip_call |= 10325 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10326 DRAWSTATE_INVALID_RENDERPASS, "DS", 10327 "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i); 10328 } 10329 } 10330 } 10331 // If there is a dependency needed make sure one exists 10332 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 10333 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 10334 // If the attachment is an input then all subpasses that output must have a dependency relationship 10335 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 10336 uint32_t attachment = subpass.pInputAttachments[j].attachment; 10337 if (attachment == VK_ATTACHMENT_UNUSED) 10338 continue; 10339 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 10340 } 10341 // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship 10342 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 10343 uint32_t attachment = subpass.pColorAttachments[j].attachment; 10344 if (attachment == VK_ATTACHMENT_UNUSED) 10345 continue; 10346 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 10347 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call); 10348 } 10349 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 10350 const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment; 10351 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 10352 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call); 10353 } 10354 } 10355 // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was 10356 // written. 10357 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 10358 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 10359 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 10360 CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call); 10361 } 10362 } 10363 return skip_call; 10364} 10365// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the 10366// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that 10367// READ_ONLY layout attachments don't have CLEAR as their loadOp. 10368static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout, 10369 const uint32_t attachment, 10370 const VkAttachmentDescription &attachment_description) { 10371 bool skip_call = false; 10372 // Verify that initial loadOp on READ_ONLY attachments is not CLEAR 10373 if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { 10374 if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || 10375 (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) { 10376 skip_call |= 10377 log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 10378 VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 10379 "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); 10380 } 10381 } 10382 return skip_call; 10383} 10384 10385static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) { 10386 bool skip = false; 10387 10388 // Track when we're observing the first use of an attachment 10389 std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true); 10390 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 10391 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 10392 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 10393 auto attach_index = subpass.pColorAttachments[j].attachment; 10394 if (attach_index == VK_ATTACHMENT_UNUSED) 10395 continue; 10396 10397 switch (subpass.pColorAttachments[j].layout) { 10398 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: 10399 /* This is ideal. */ 10400 break; 10401 10402 case VK_IMAGE_LAYOUT_GENERAL: 10403 /* May not be optimal; TODO: reconsider this warning based on 10404 * other constraints? 10405 */ 10406 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 10407 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 10408 "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL."); 10409 break; 10410 10411 default: 10412 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, 10413 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 10414 "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.", 10415 string_VkImageLayout(subpass.pColorAttachments[j].layout)); 10416 } 10417 10418 if (attach_first_use[attach_index]) { 10419 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout, 10420 attach_index, pCreateInfo->pAttachments[attach_index]); 10421 } 10422 attach_first_use[attach_index] = false; 10423 } 10424 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 10425 switch (subpass.pDepthStencilAttachment->layout) { 10426 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: 10427 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: 10428 /* These are ideal. */ 10429 break; 10430 10431 case VK_IMAGE_LAYOUT_GENERAL: 10432 /* May not be optimal; TODO: reconsider this warning based on 10433 * other constraints? GENERAL can be better than doing a bunch 10434 * of transitions. 10435 */ 10436 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 10437 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 10438 "GENERAL layout for depth attachment may not give optimal performance."); 10439 break; 10440 10441 default: 10442 /* No other layouts are acceptable */ 10443 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, 10444 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 10445 "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, " 10446 "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.", 10447 string_VkImageLayout(subpass.pDepthStencilAttachment->layout)); 10448 } 10449 10450 auto attach_index = subpass.pDepthStencilAttachment->attachment; 10451 if (attach_first_use[attach_index]) { 10452 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout, 10453 attach_index, pCreateInfo->pAttachments[attach_index]); 10454 } 10455 attach_first_use[attach_index] = false; 10456 } 10457 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 10458 auto attach_index = subpass.pInputAttachments[j].attachment; 10459 if (attach_index == VK_ATTACHMENT_UNUSED) 10460 continue; 10461 10462 switch (subpass.pInputAttachments[j].layout) { 10463 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: 10464 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: 10465 /* These are ideal. */ 10466 break; 10467 10468 case VK_IMAGE_LAYOUT_GENERAL: 10469 /* May not be optimal. TODO: reconsider this warning based on 10470 * other constraints. 10471 */ 10472 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 10473 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 10474 "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL."); 10475 break; 10476 10477 default: 10478 /* No other layouts are acceptable */ 10479 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10480 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 10481 "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.", 10482 string_VkImageLayout(subpass.pInputAttachments[j].layout)); 10483 } 10484 10485 if (attach_first_use[attach_index]) { 10486 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout, 10487 attach_index, pCreateInfo->pAttachments[attach_index]); 10488 } 10489 attach_first_use[attach_index] = false; 10490 } 10491 } 10492 return skip; 10493} 10494 10495static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 10496 std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) { 10497 bool skip_call = false; 10498 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 10499 DAGNode &subpass_node = subpass_to_node[i]; 10500 subpass_node.pass = i; 10501 } 10502 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { 10503 const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i]; 10504 if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) { 10505 if (dependency.srcSubpass == dependency.dstSubpass) { 10506 skip_call |= 10507 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10508 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external."); 10509 } 10510 } else if (dependency.srcSubpass > dependency.dstSubpass) { 10511 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10512 DRAWSTATE_INVALID_RENDERPASS, "DS", 10513 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass."); 10514 } else if (dependency.srcSubpass == dependency.dstSubpass) { 10515 has_self_dependency[dependency.srcSubpass] = true; 10516 } else { 10517 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass); 10518 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass); 10519 } 10520 } 10521 return skip_call; 10522} 10523 10524 10525VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo, 10526 const VkAllocationCallbacks *pAllocator, 10527 VkShaderModule *pShaderModule) { 10528 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10529 bool skip_call = false; 10530 10531 /* Use SPIRV-Tools validator to try and catch any issues with the module itself */ 10532 spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0); 10533 spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) }; 10534 spv_diagnostic diag = nullptr; 10535 10536 auto result = spvValidate(ctx, &binary, &diag); 10537 if (result != SPV_SUCCESS) { 10538 skip_call |= 10539 log_msg(dev_data->report_data, result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT, 10540 VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", 10541 "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)"); 10542 } 10543 10544 spvDiagnosticDestroy(diag); 10545 spvContextDestroy(ctx); 10546 10547 if (skip_call) 10548 return VK_ERROR_VALIDATION_FAILED_EXT; 10549 10550 VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule); 10551 10552 if (res == VK_SUCCESS) { 10553 std::lock_guard<std::mutex> lock(global_lock); 10554 dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo)); 10555 } 10556 return res; 10557} 10558 10559static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) { 10560 bool skip_call = false; 10561 if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) { 10562 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10563 VALIDATION_ERROR_00325, "DS", 10564 "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s", 10565 type, attachment, attachment_count, validation_error_map[VALIDATION_ERROR_00325]); 10566 } 10567 return skip_call; 10568} 10569 10570static bool IsPowerOfTwo(unsigned x) { 10571 return x && !(x & (x-1)); 10572} 10573 10574static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) { 10575 bool skip_call = false; 10576 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 10577 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 10578 if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) { 10579 skip_call |= 10580 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10581 DRAWSTATE_INVALID_RENDERPASS, "DS", 10582 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i); 10583 } 10584 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { 10585 uint32_t attachment = subpass.pPreserveAttachments[j]; 10586 if (attachment == VK_ATTACHMENT_UNUSED) { 10587 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 10588 __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS", 10589 "CreateRenderPass: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j); 10590 } else { 10591 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve"); 10592 } 10593 } 10594 10595 auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of( 10596 subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount, 10597 [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; }); 10598 10599 unsigned sample_count = 0; 10600 10601 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 10602 uint32_t attachment; 10603 if (subpass.pResolveAttachments) { 10604 attachment = subpass.pResolveAttachments[j].attachment; 10605 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve"); 10606 10607 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED && 10608 pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) { 10609 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 10610 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 10611 "CreateRenderPass: Subpass %u requests multisample resolve into attachment %u, " 10612 "which must have VK_SAMPLE_COUNT_1_BIT but has %s", 10613 i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples)); 10614 } 10615 } 10616 attachment = subpass.pColorAttachments[j].attachment; 10617 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color"); 10618 10619 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) { 10620 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples; 10621 10622 if (subpass_performs_resolve && 10623 pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) { 10624 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 10625 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 10626 "CreateRenderPass: Subpass %u requests multisample resolve from attachment %u " 10627 "which has VK_SAMPLE_COUNT_1_BIT", 10628 i, attachment); 10629 } 10630 } 10631 } 10632 10633 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 10634 uint32_t attachment = subpass.pDepthStencilAttachment->attachment; 10635 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil"); 10636 10637 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) { 10638 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples; 10639 } 10640 } 10641 10642 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 10643 uint32_t attachment = subpass.pInputAttachments[j].attachment; 10644 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input"); 10645 } 10646 10647 if (sample_count && !IsPowerOfTwo(sample_count)) { 10648 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 10649 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 10650 "CreateRenderPass: Subpass %u attempts to render to " 10651 "attachments with inconsistent sample counts", 10652 i); 10653 } 10654 } 10655 return skip_call; 10656} 10657 10658VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 10659 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { 10660 bool skip_call = false; 10661 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10662 10663 std::unique_lock<std::mutex> lock(global_lock); 10664 10665 // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with 10666 // ValidateLayouts. 10667 skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo); 10668 if (!skip_call) { 10669 skip_call |= ValidateLayouts(dev_data, device, pCreateInfo); 10670 } 10671 lock.unlock(); 10672 10673 if (skip_call) { 10674 return VK_ERROR_VALIDATION_FAILED_EXT; 10675 } 10676 10677 VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); 10678 10679 if (VK_SUCCESS == result) { 10680 lock.lock(); 10681 10682 std::vector<bool> has_self_dependency(pCreateInfo->subpassCount); 10683 std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount); 10684 skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency); 10685 10686 auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo)); 10687 render_pass->renderPass = *pRenderPass; 10688 render_pass->hasSelfDependency = has_self_dependency; 10689 render_pass->subpassToNode = subpass_to_node; 10690 10691 // TODO: Maybe fill list and then copy instead of locking 10692 std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read; 10693 std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout; 10694 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 10695 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 10696 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 10697 uint32_t attachment = subpass.pColorAttachments[j].attachment; 10698 if (!attachment_first_read.count(attachment)) { 10699 attachment_first_read.insert(std::make_pair(attachment, false)); 10700 attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout)); 10701 } 10702 } 10703 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 10704 uint32_t attachment = subpass.pDepthStencilAttachment->attachment; 10705 if (!attachment_first_read.count(attachment)) { 10706 attachment_first_read.insert(std::make_pair(attachment, false)); 10707 attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout)); 10708 } 10709 } 10710 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 10711 uint32_t attachment = subpass.pInputAttachments[j].attachment; 10712 if (!attachment_first_read.count(attachment)) { 10713 attachment_first_read.insert(std::make_pair(attachment, true)); 10714 attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout)); 10715 } 10716 } 10717 } 10718 10719 dev_data->renderPassMap[*pRenderPass] = std::move(render_pass); 10720 } 10721 return result; 10722} 10723 10724static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) { 10725 bool skip_call = false; 10726 auto const pRenderPassInfo = getRenderPassState(dev_data, pRenderPassBegin->renderPass)->createInfo.ptr(); 10727 auto const & framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo; 10728 if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) { 10729 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10730 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer " 10731 "with a different number of attachments."); 10732 } 10733 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { 10734 const VkImageView &image_view = framebufferInfo.pAttachments[i]; 10735 auto view_state = getImageViewState(dev_data, image_view); 10736 assert(view_state); 10737 const VkImage &image = view_state->create_info.image; 10738 const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange; 10739 IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout, 10740 pRenderPassInfo->pAttachments[i].initialLayout}; 10741 // TODO: Do not iterate over every possibility - consolidate where possible 10742 for (uint32_t j = 0; j < subRange.levelCount; j++) { 10743 uint32_t level = subRange.baseMipLevel + j; 10744 for (uint32_t k = 0; k < subRange.layerCount; k++) { 10745 uint32_t layer = subRange.baseArrayLayer + k; 10746 VkImageSubresource sub = {subRange.aspectMask, level, layer}; 10747 IMAGE_CMD_BUF_LAYOUT_NODE node; 10748 if (!FindLayout(pCB, image, sub, node)) { 10749 SetLayout(pCB, image, sub, newNode); 10750 continue; 10751 } 10752 if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED && 10753 newNode.layout != node.layout) { 10754 skip_call |= 10755 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10756 DRAWSTATE_INVALID_RENDERPASS, "DS", 10757 "You cannot start a render pass using attachment %u " 10758 "where the render pass initial layout is %s and the previous " 10759 "known layout of the attachment is %s. The layouts must match, or " 10760 "the render pass initial layout for the attachment must be " 10761 "VK_IMAGE_LAYOUT_UNDEFINED", 10762 i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout)); 10763 } 10764 } 10765 } 10766 } 10767 return skip_call; 10768} 10769 10770static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer, 10771 VkAttachmentReference ref) { 10772 if (ref.attachment != VK_ATTACHMENT_UNUSED) { 10773 auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment]; 10774 SetLayout(dev_data, pCB, image_view, ref.layout); 10775 } 10776} 10777 10778static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin, 10779 const int subpass_index) { 10780 auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass); 10781 if (!renderPass) 10782 return; 10783 10784 auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer); 10785 if (!framebuffer) 10786 return; 10787 10788 auto const &subpass = renderPass->createInfo.pSubpasses[subpass_index]; 10789 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 10790 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]); 10791 } 10792 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 10793 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]); 10794 } 10795 if (subpass.pDepthStencilAttachment) { 10796 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment); 10797 } 10798} 10799 10800static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) { 10801 bool skip_call = false; 10802 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 10803 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10804 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.", 10805 cmd_name.c_str()); 10806 } 10807 return skip_call; 10808} 10809 10810static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) { 10811 auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass); 10812 if (!renderPass) 10813 return; 10814 10815 const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->createInfo.ptr(); 10816 auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer); 10817 if (!framebuffer) 10818 return; 10819 10820 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { 10821 auto image_view = framebuffer->createInfo.pAttachments[i]; 10822 SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout); 10823 } 10824} 10825 10826static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) { 10827 bool skip_call = false; 10828 const safe_VkFramebufferCreateInfo *pFramebufferInfo = 10829 &getFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo; 10830 if (pRenderPassBegin->renderArea.offset.x < 0 || 10831 (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width || 10832 pRenderPassBegin->renderArea.offset.y < 0 || 10833 (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) { 10834 skip_call |= static_cast<bool>(log_msg( 10835 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10836 DRAWSTATE_INVALID_RENDER_AREA, "CORE", 10837 "Cannot execute a render pass with renderArea not within the bound of the " 10838 "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, " 10839 "height %d.", 10840 pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width, 10841 pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height)); 10842 } 10843 return skip_call; 10844} 10845 10846// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the 10847// [load|store]Op flag must be checked 10848// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately. 10849template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) { 10850 if (color_depth_op != op && stencil_op != op) { 10851 return false; 10852 } 10853 bool check_color_depth_load_op = !vk_format_is_stencil_only(format); 10854 bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op; 10855 10856 return (((check_color_depth_load_op == true) && (color_depth_op == op)) || 10857 ((check_stencil_load_op == true) && (stencil_op == op))); 10858} 10859 10860VKAPI_ATTR void VKAPI_CALL 10861CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { 10862 bool skip_call = false; 10863 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 10864 std::unique_lock<std::mutex> lock(global_lock); 10865 GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer); 10866 auto renderPass = pRenderPassBegin ? getRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr; 10867 auto framebuffer = pRenderPassBegin ? getFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr; 10868 if (cb_node) { 10869 if (renderPass) { 10870 uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR 10871 cb_node->activeFramebuffer = pRenderPassBegin->framebuffer; 10872 for (uint32_t i = 0; i < renderPass->createInfo.attachmentCount; ++i) { 10873 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i]; 10874 auto pAttachment = &renderPass->createInfo.pAttachments[i]; 10875 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, 10876 pAttachment->stencilLoadOp, 10877 VK_ATTACHMENT_LOAD_OP_CLEAR)) { 10878 clear_op_size = static_cast<uint32_t>(i) + 1; 10879 std::function<bool()> function = [=]() { 10880 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true); 10881 return false; 10882 }; 10883 cb_node->validate_functions.push_back(function); 10884 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, 10885 pAttachment->stencilLoadOp, 10886 VK_ATTACHMENT_LOAD_OP_DONT_CARE)) { 10887 std::function<bool()> function = [=]() { 10888 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false); 10889 return false; 10890 }; 10891 cb_node->validate_functions.push_back(function); 10892 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, 10893 pAttachment->stencilLoadOp, 10894 VK_ATTACHMENT_LOAD_OP_LOAD)) { 10895 std::function<bool()> function = [=]() { 10896 return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image), 10897 "vkCmdBeginRenderPass()"); 10898 }; 10899 cb_node->validate_functions.push_back(function); 10900 } 10901 if (renderPass->attachment_first_read[i]) { 10902 std::function<bool()> function = [=]() { 10903 return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image), 10904 "vkCmdBeginRenderPass()"); 10905 }; 10906 cb_node->validate_functions.push_back(function); 10907 } 10908 } 10909 if (clear_op_size > pRenderPassBegin->clearValueCount) { 10910 skip_call |= log_msg( 10911 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 10912 reinterpret_cast<uint64_t &>(renderPass), __LINE__, VALIDATION_ERROR_00442, 10913 "DS", "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must " 10914 "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass " 10915 "0x%" PRIx64 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array " 10916 "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to " 10917 "attachments that aren't cleared they will be ignored. %s", 10918 pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass), clear_op_size, 10919 clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]); 10920 } 10921 if (clear_op_size < pRenderPassBegin->clearValueCount) { 10922 skip_call |= log_msg( 10923 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 10924 reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_TOO_MANY_CLEAR_VALUES, "DS", 10925 "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but only first %u " 10926 "entries in pClearValues array are used. The highest index attachment in renderPass 0x%" PRIx64 10927 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u - other pClearValues are ignored.", 10928 pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass), clear_op_size); 10929 } 10930 skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin); 10931 skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin); 10932 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_00440); 10933 skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass); 10934 skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass"); 10935 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()"); 10936 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BEGINRENDERPASS); 10937 cb_node->activeRenderPass = renderPass; 10938 // This is a shallow copy as that is all that is needed for now 10939 cb_node->activeRenderPassBeginInfo = *pRenderPassBegin; 10940 cb_node->activeSubpass = 0; 10941 cb_node->activeSubpassContents = contents; 10942 cb_node->framebuffers.insert(pRenderPassBegin->framebuffer); 10943 // Connect this framebuffer and its children to this cmdBuffer 10944 AddFramebufferBinding(dev_data, cb_node, framebuffer); 10945 // transition attachments to the correct layouts for the first subpass 10946 TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass); 10947 } else { 10948 skip_call |= 10949 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 10950 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()"); 10951 } 10952 } 10953 lock.unlock(); 10954 if (!skip_call) { 10955 dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); 10956 } 10957} 10958 10959VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { 10960 bool skip_call = false; 10961 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 10962 std::unique_lock<std::mutex> lock(global_lock); 10963 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 10964 if (pCB) { 10965 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass"); 10966 skip_call |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()"); 10967 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_NEXTSUBPASS); 10968 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_00458); 10969 10970 auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount; 10971 if (pCB->activeSubpass == subpassCount - 1) { 10972 skip_call |= 10973 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 10974 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS", 10975 "vkCmdNextSubpass(): Attempted to advance beyond final subpass"); 10976 } 10977 } 10978 lock.unlock(); 10979 10980 if (skip_call) 10981 return; 10982 10983 dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents); 10984 10985 if (pCB) { 10986 lock.lock(); 10987 pCB->activeSubpass++; 10988 pCB->activeSubpassContents = contents; 10989 TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass); 10990 } 10991} 10992 10993VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) { 10994 bool skip_call = false; 10995 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 10996 std::unique_lock<std::mutex> lock(global_lock); 10997 auto pCB = getCBNode(dev_data, commandBuffer); 10998 if (pCB) { 10999 RENDER_PASS_STATE *rp_state = pCB->activeRenderPass; 11000 auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer); 11001 if (rp_state) { 11002 if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) { 11003 skip_call |= 11004 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 11005 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS", 11006 "vkCmdEndRenderPass(): Called before reaching final subpass"); 11007 } 11008 11009 for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) { 11010 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i]; 11011 auto pAttachment = &rp_state->createInfo.pAttachments[i]; 11012 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, 11013 pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE)) { 11014 std::function<bool()> function = [=]() { 11015 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true); 11016 return false; 11017 }; 11018 pCB->validate_functions.push_back(function); 11019 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, 11020 pAttachment->stencilStoreOp, 11021 VK_ATTACHMENT_STORE_OP_DONT_CARE)) { 11022 std::function<bool()> function = [=]() { 11023 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false); 11024 return false; 11025 }; 11026 pCB->validate_functions.push_back(function); 11027 } 11028 } 11029 } 11030 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_00464); 11031 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass"); 11032 skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()"); 11033 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDRENDERPASS); 11034 } 11035 lock.unlock(); 11036 11037 if (skip_call) 11038 return; 11039 11040 dev_data->dispatch_table.CmdEndRenderPass(commandBuffer); 11041 11042 if (pCB) { 11043 lock.lock(); 11044 TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo); 11045 pCB->activeRenderPass = nullptr; 11046 pCB->activeSubpass = 0; 11047 pCB->activeFramebuffer = VK_NULL_HANDLE; 11048 } 11049} 11050 11051static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach, 11052 uint32_t secondaryAttach, const char *msg) { 11053 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 11054 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 11055 "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass " 11056 "that is not compatible with the Primary Cmd Buffer current render pass. " 11057 "Attachment %u is not compatible with %u: %s", 11058 reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg); 11059} 11060 11061static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, 11062 VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach, 11063 VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI, 11064 uint32_t secondaryAttach, bool is_multi) { 11065 bool skip_call = false; 11066 if (primaryPassCI->attachmentCount <= primaryAttach) { 11067 primaryAttach = VK_ATTACHMENT_UNUSED; 11068 } 11069 if (secondaryPassCI->attachmentCount <= secondaryAttach) { 11070 secondaryAttach = VK_ATTACHMENT_UNUSED; 11071 } 11072 if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) { 11073 return skip_call; 11074 } 11075 if (primaryAttach == VK_ATTACHMENT_UNUSED) { 11076 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, 11077 "The first is unused while the second is not."); 11078 return skip_call; 11079 } 11080 if (secondaryAttach == VK_ATTACHMENT_UNUSED) { 11081 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, 11082 "The second is unused while the first is not."); 11083 return skip_call; 11084 } 11085 if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) { 11086 skip_call |= 11087 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats."); 11088 } 11089 if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) { 11090 skip_call |= 11091 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples."); 11092 } 11093 if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) { 11094 skip_call |= 11095 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags."); 11096 } 11097 return skip_call; 11098} 11099 11100static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, 11101 VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer, 11102 VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) { 11103 bool skip_call = false; 11104 const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass]; 11105 const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass]; 11106 uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount); 11107 for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) { 11108 uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED; 11109 if (i < primary_desc.inputAttachmentCount) { 11110 primary_input_attach = primary_desc.pInputAttachments[i].attachment; 11111 } 11112 if (i < secondary_desc.inputAttachmentCount) { 11113 secondary_input_attach = secondary_desc.pInputAttachments[i].attachment; 11114 } 11115 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer, 11116 secondaryPassCI, secondary_input_attach, is_multi); 11117 } 11118 uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount); 11119 for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) { 11120 uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED; 11121 if (i < primary_desc.colorAttachmentCount) { 11122 primary_color_attach = primary_desc.pColorAttachments[i].attachment; 11123 } 11124 if (i < secondary_desc.colorAttachmentCount) { 11125 secondary_color_attach = secondary_desc.pColorAttachments[i].attachment; 11126 } 11127 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer, 11128 secondaryPassCI, secondary_color_attach, is_multi); 11129 uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED; 11130 if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) { 11131 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment; 11132 } 11133 if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) { 11134 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment; 11135 } 11136 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach, 11137 secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi); 11138 } 11139 uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED; 11140 if (primary_desc.pDepthStencilAttachment) { 11141 primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment; 11142 } 11143 if (secondary_desc.pDepthStencilAttachment) { 11144 secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment; 11145 } 11146 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach, 11147 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi); 11148 return skip_call; 11149} 11150 11151// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible. 11152// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and 11153// will then feed into this function 11154static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, 11155 VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer, 11156 VkRenderPassCreateInfo const *secondaryPassCI) { 11157 bool skip_call = false; 11158 11159 if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) { 11160 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 11161 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 11162 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64 11163 " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64 11164 " that has a subpassCount of %u.", 11165 reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount, 11166 reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount); 11167 } else { 11168 for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) { 11169 skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i, 11170 primaryPassCI->subpassCount > 1); 11171 } 11172 } 11173 return skip_call; 11174} 11175 11176static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB, 11177 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) { 11178 bool skip_call = false; 11179 if (!pSubCB->beginInfo.pInheritanceInfo) { 11180 return skip_call; 11181 } 11182 VkFramebuffer primary_fb = pCB->activeFramebuffer; 11183 VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer; 11184 if (secondary_fb != VK_NULL_HANDLE) { 11185 if (primary_fb != secondary_fb) { 11186 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 11187 DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS", 11188 "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64 11189 " which has a framebuffer 0x%" PRIx64 11190 " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ".", 11191 reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb), 11192 reinterpret_cast<uint64_t &>(primary_fb)); 11193 } 11194 auto fb = getFramebufferState(dev_data, secondary_fb); 11195 if (!fb) { 11196 skip_call |= 11197 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 11198 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p " 11199 "which has invalid framebuffer 0x%" PRIx64 ".", 11200 (void *)secondaryBuffer, (uint64_t)(secondary_fb)); 11201 return skip_call; 11202 } 11203 auto cb_renderpass = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass); 11204 if (cb_renderpass->renderPass != fb->createInfo.renderPass) { 11205 skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer, 11206 cb_renderpass->createInfo.ptr()); 11207 } 11208 } 11209 return skip_call; 11210} 11211 11212static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) { 11213 bool skip_call = false; 11214 unordered_set<int> activeTypes; 11215 for (auto queryObject : pCB->activeQueries) { 11216 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool); 11217 if (queryPoolData != dev_data->queryPoolMap.end()) { 11218 if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS && 11219 pSubCB->beginInfo.pInheritanceInfo) { 11220 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics; 11221 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) { 11222 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 11223 __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 11224 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p " 11225 "which has invalid active query pool 0x%" PRIx64 11226 ". Pipeline statistics is being queried so the command " 11227 "buffer must have all bits set on the queryPool.", 11228 pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first)); 11229 } 11230 } 11231 activeTypes.insert(queryPoolData->second.createInfo.queryType); 11232 } 11233 } 11234 for (auto queryObject : pSubCB->startedQueries) { 11235 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool); 11236 if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) { 11237 skip_call |= 11238 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 11239 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 11240 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p " 11241 "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on " 11242 "secondary Cmd Buffer 0x%p.", 11243 pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first), 11244 queryPoolData->second.createInfo.queryType, pSubCB->commandBuffer); 11245 } 11246 } 11247 11248 auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool); 11249 auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool); 11250 if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) { 11251 skip_call |= 11252 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 11253 reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS", 11254 "vkCmdExecuteCommands(): Primary command buffer 0x%p" 11255 " created in queue family %d has secondary command buffer 0x%p created in queue family %d.", 11256 pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex); 11257 } 11258 11259 return skip_call; 11260} 11261 11262VKAPI_ATTR void VKAPI_CALL 11263CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) { 11264 bool skip_call = false; 11265 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 11266 std::unique_lock<std::mutex> lock(global_lock); 11267 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 11268 if (pCB) { 11269 GLOBAL_CB_NODE *pSubCB = NULL; 11270 for (uint32_t i = 0; i < commandBuffersCount; i++) { 11271 pSubCB = getCBNode(dev_data, pCommandBuffers[i]); 11272 if (!pSubCB) { 11273 skip_call |= 11274 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 11275 VALIDATION_ERROR_00160, "DS", 11276 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array. %s", 11277 pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_00160]); 11278 } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) { 11279 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 11280 __LINE__, VALIDATION_ERROR_00153, "DS", 11281 "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers " 11282 "array. All cmd buffers in pCommandBuffers array must be secondary. %s", 11283 pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_00153]); 11284 } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set 11285 auto secondary_rp_state = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass); 11286 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { 11287 skip_call |= log_msg( 11288 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 11289 (uint64_t)pCommandBuffers[i], __LINE__, VALIDATION_ERROR_02057, "DS", 11290 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64 11291 ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set. %s", 11292 pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass, 11293 validation_error_map[VALIDATION_ERROR_02057]); 11294 } else { 11295 // Make sure render pass is compatible with parent command buffer pass if has continue 11296 if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) { 11297 skip_call |= 11298 validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(), 11299 pCommandBuffers[i], secondary_rp_state->createInfo.ptr()); 11300 } 11301 // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB 11302 skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB); 11303 } 11304 string errorString = ""; 11305 // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass 11306 if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) && 11307 !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(), 11308 secondary_rp_state->createInfo.ptr(), errorString)) { 11309 skip_call |= log_msg( 11310 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 11311 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 11312 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 11313 ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s", 11314 pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, commandBuffer, 11315 (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str()); 11316 } 11317 } 11318 // TODO(mlentine): Move more logic into this method 11319 skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB); 11320 skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()"); 11321 // Secondary cmdBuffers are considered pending execution starting w/ 11322 // being recorded 11323 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { 11324 if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) { 11325 skip_call |= 11326 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 11327 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCB->commandBuffer), __LINE__, 11328 VALIDATION_ERROR_00154, "DS", "Attempt to simultaneously execute command buffer 0x%p" 11329 " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s", 11330 pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_00154]); 11331 } 11332 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { 11333 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous 11334 skip_call |= log_msg( 11335 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 11336 (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 11337 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) " 11338 "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer " 11339 "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT " 11340 "set, even though it does.", 11341 pCommandBuffers[i], pCB->commandBuffer); 11342 pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; 11343 } 11344 } 11345 if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) { 11346 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 11347 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(pCommandBuffers[i]), 11348 __LINE__, VALIDATION_ERROR_02062, "DS", "vkCmdExecuteCommands(): Secondary Command Buffer " 11349 "(0x%p) cannot be submitted with a query in " 11350 "flight and inherited queries not " 11351 "supported on this device. %s", 11352 pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_02062]); 11353 } 11354 // Propagate layout transitions to the primary cmd buffer 11355 for (auto ilm_entry : pSubCB->imageLayoutMap) { 11356 SetLayout(pCB, ilm_entry.first, ilm_entry.second); 11357 } 11358 pSubCB->primaryCommandBuffer = pCB->commandBuffer; 11359 pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer); 11360 dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer); 11361 for (auto &function : pSubCB->queryUpdates) { 11362 pCB->queryUpdates.push_back(function); 11363 } 11364 } 11365 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands"); 11366 skip_call |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()"); 11367 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_EXECUTECOMMANDS); 11368 } 11369 lock.unlock(); 11370 if (!skip_call) 11371 dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers); 11372} 11373 11374// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL 11375static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset, 11376 VkDeviceSize end_offset) { 11377 bool skip_call = false; 11378 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 11379 // Iterate over all bound image ranges and verify that for any that overlap the 11380 // map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL 11381 // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range 11382 for (auto image_handle : mem_info->bound_images) { 11383 auto img_it = mem_info->bound_ranges.find(image_handle); 11384 if (img_it != mem_info->bound_ranges.end()) { 11385 if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) { 11386 std::vector<VkImageLayout> layouts; 11387 if (FindLayouts(dev_data, VkImage(image_handle), layouts)) { 11388 for (auto layout : layouts) { 11389 if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) { 11390 skip_call |= 11391 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 11392 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only " 11393 "GENERAL or PREINITIALIZED are supported.", 11394 string_VkImageLayout(layout)); 11395 } 11396 } 11397 } 11398 } 11399 } 11400 } 11401 return skip_call; 11402} 11403 11404VKAPI_ATTR VkResult VKAPI_CALL 11405MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) { 11406 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 11407 11408 bool skip_call = false; 11409 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 11410 std::unique_lock<std::mutex> lock(global_lock); 11411 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem); 11412 if (mem_info) { 11413 // TODO : This could me more fine-grained to track just region that is valid 11414 mem_info->global_valid = true; 11415 auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1; 11416 skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset); 11417 // TODO : Do we need to create new "bound_range" for the mapped range? 11418 SetMemRangesValid(dev_data, mem_info, offset, end_offset); 11419 if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & 11420 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { 11421 skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 11422 (uint64_t)mem, __LINE__, VALIDATION_ERROR_00629, "MEM", 11423 "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s", 11424 (uint64_t)mem, validation_error_map[VALIDATION_ERROR_00629]); 11425 } 11426 } 11427 skip_call |= ValidateMapMemRange(dev_data, mem, offset, size); 11428 lock.unlock(); 11429 11430 if (!skip_call) { 11431 result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData); 11432 if (VK_SUCCESS == result) { 11433 lock.lock(); 11434 // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this 11435 storeMemRanges(dev_data, mem, offset, size); 11436 initializeAndTrackMemory(dev_data, mem, offset, size, ppData); 11437 lock.unlock(); 11438 } 11439 } 11440 return result; 11441} 11442 11443VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) { 11444 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 11445 bool skip_call = false; 11446 11447 std::unique_lock<std::mutex> lock(global_lock); 11448 skip_call |= deleteMemRanges(dev_data, mem); 11449 lock.unlock(); 11450 if (!skip_call) { 11451 dev_data->dispatch_table.UnmapMemory(device, mem); 11452 } 11453} 11454 11455static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount, 11456 const VkMappedMemoryRange *pMemRanges) { 11457 bool skip = false; 11458 for (uint32_t i = 0; i < memRangeCount; ++i) { 11459 auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory); 11460 if (mem_info) { 11461 if (pMemRanges[i].size == VK_WHOLE_SIZE) { 11462 if (mem_info->mem_range.offset > pMemRanges[i].offset) { 11463 skip |= 11464 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 11465 (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00643, "MEM", 11466 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset " 11467 "(" PRINTF_SIZE_T_SPECIFIER "). %s", 11468 funcName, static_cast<size_t>(pMemRanges[i].offset), 11469 static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_00643]); 11470 } 11471 } else { 11472 const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE) 11473 ? mem_info->alloc_info.allocationSize 11474 : (mem_info->mem_range.offset + mem_info->mem_range.size); 11475 if ((mem_info->mem_range.offset > pMemRanges[i].offset) || 11476 (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) { 11477 skip |= 11478 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 11479 (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00642, "MEM", 11480 "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER 11481 ") exceed the Memory Object's upper-bound " 11482 "(" PRINTF_SIZE_T_SPECIFIER "). %s", 11483 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), 11484 static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end), 11485 validation_error_map[VALIDATION_ERROR_00642]); 11486 } 11487 } 11488 } 11489 } 11490 return skip; 11491} 11492 11493static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count, 11494 const VkMappedMemoryRange *mem_ranges) { 11495 bool skip = false; 11496 for (uint32_t i = 0; i < mem_range_count; ++i) { 11497 auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory); 11498 if (mem_info) { 11499 if (mem_info->shadow_copy) { 11500 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE) 11501 ? mem_info->mem_range.size 11502 : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset); 11503 char *data = static_cast<char *>(mem_info->shadow_copy); 11504 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) { 11505 if (data[j] != NoncoherentMemoryFillValue) { 11506 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 11507 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__, 11508 MEMTRACK_INVALID_MAP, "MEM", "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, 11509 (uint64_t)mem_ranges[i].memory); 11510 } 11511 } 11512 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) { 11513 if (data[j] != NoncoherentMemoryFillValue) { 11514 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 11515 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__, 11516 MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, 11517 (uint64_t)mem_ranges[i].memory); 11518 } 11519 } 11520 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size)); 11521 } 11522 } 11523 } 11524 return skip; 11525} 11526 11527static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) { 11528 for (uint32_t i = 0; i < mem_range_count; ++i) { 11529 auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory); 11530 if (mem_info && mem_info->shadow_copy) { 11531 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE) 11532 ? mem_info->mem_range.size 11533 : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset); 11534 char *data = static_cast<char *>(mem_info->shadow_copy); 11535 memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size)); 11536 } 11537 } 11538} 11539 11540static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count, 11541 const VkMappedMemoryRange *mem_ranges) { 11542 bool skip = false; 11543 for (uint32_t i = 0; i < mem_range_count; ++i) { 11544 uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize; 11545 if (vk_safe_modulo(mem_ranges[i].offset, atom_size) != 0) { 11546 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, 11547 __LINE__, VALIDATION_ERROR_00644, "MEM", 11548 "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64 11549 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s", 11550 func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_00644]); 11551 } 11552 if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (vk_safe_modulo(mem_ranges[i].size, atom_size) != 0)) { 11553 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, 11554 __LINE__, VALIDATION_ERROR_00645, "MEM", 11555 "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64 11556 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s", 11557 func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_00645]); 11558 } 11559 } 11560 return skip; 11561} 11562 11563static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count, 11564 const VkMappedMemoryRange *mem_ranges) { 11565 bool skip = false; 11566 std::lock_guard<std::mutex> lock(global_lock); 11567 skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges); 11568 skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges); 11569 return skip; 11570} 11571 11572VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, 11573 const VkMappedMemoryRange *pMemRanges) { 11574 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 11575 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 11576 11577 if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) { 11578 result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges); 11579 } 11580 return result; 11581} 11582 11583static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count, 11584 const VkMappedMemoryRange *mem_ranges) { 11585 bool skip = false; 11586 std::lock_guard<std::mutex> lock(global_lock); 11587 skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges); 11588 return skip; 11589} 11590 11591static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count, 11592 const VkMappedMemoryRange *mem_ranges) { 11593 std::lock_guard<std::mutex> lock(global_lock); 11594 // Update our shadow copy with modified driver data 11595 CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges); 11596} 11597 11598VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, 11599 const VkMappedMemoryRange *pMemRanges) { 11600 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 11601 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 11602 11603 if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) { 11604 result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges); 11605 if (result == VK_SUCCESS) { 11606 PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges); 11607 } 11608 } 11609 return result; 11610} 11611 11612VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) { 11613 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 11614 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 11615 bool skip_call = false; 11616 std::unique_lock<std::mutex> lock(global_lock); 11617 auto image_state = getImageState(dev_data, image); 11618 if (image_state) { 11619 // Track objects tied to memory 11620 uint64_t image_handle = reinterpret_cast<uint64_t &>(image); 11621 skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory"); 11622 if (!image_state->memory_requirements_checked) { 11623 // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling 11624 // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from 11625 // vkGetImageMemoryRequirements() 11626 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 11627 image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS", 11628 "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64 11629 " but vkGetImageMemoryRequirements() has not been called on that image.", 11630 image_handle); 11631 // Make the call for them so we can verify the state 11632 lock.unlock(); 11633 dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &image_state->requirements); 11634 lock.lock(); 11635 } 11636 11637 // Track and validate bound memory range information 11638 auto mem_info = getMemObjInfo(dev_data, mem); 11639 if (mem_info) { 11640 skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements, 11641 image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR); 11642 skip_call |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory"); 11643 } 11644 11645 lock.unlock(); 11646 if (!skip_call) { 11647 result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset); 11648 lock.lock(); 11649 image_state->binding.mem = mem; 11650 image_state->binding.offset = memoryOffset; 11651 image_state->binding.size = image_state->requirements.size; 11652 lock.unlock(); 11653 } 11654 } else { 11655 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 11656 reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT", 11657 "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?", 11658 reinterpret_cast<const uint64_t &>(image)); 11659 } 11660 return result; 11661} 11662 11663VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) { 11664 bool skip_call = false; 11665 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 11666 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 11667 std::unique_lock<std::mutex> lock(global_lock); 11668 auto event_state = getEventNode(dev_data, event); 11669 if (event_state) { 11670 event_state->needsSignaled = false; 11671 event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT; 11672 if (event_state->write_in_use) { 11673 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, 11674 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 11675 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.", 11676 reinterpret_cast<const uint64_t &>(event)); 11677 } 11678 } 11679 lock.unlock(); 11680 // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event 11681 // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the 11682 // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297) 11683 for (auto queue_data : dev_data->queueMap) { 11684 auto event_entry = queue_data.second.eventToStageMap.find(event); 11685 if (event_entry != queue_data.second.eventToStageMap.end()) { 11686 event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT; 11687 } 11688 } 11689 if (!skip_call) 11690 result = dev_data->dispatch_table.SetEvent(device, event); 11691 return result; 11692} 11693 11694VKAPI_ATTR VkResult VKAPI_CALL 11695QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) { 11696 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 11697 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 11698 bool skip_call = false; 11699 std::unique_lock<std::mutex> lock(global_lock); 11700 auto pFence = getFenceNode(dev_data, fence); 11701 auto pQueue = getQueueState(dev_data, queue); 11702 11703 // First verify that fence is not in use 11704 skip_call |= ValidateFenceForSubmit(dev_data, pFence); 11705 11706 if (pFence) { 11707 SubmitFence(pQueue, pFence, bindInfoCount); 11708 } 11709 11710 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) { 11711 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx]; 11712 // Track objects tied to memory 11713 for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) { 11714 for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) { 11715 auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k]; 11716 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size}, 11717 (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 11718 "vkQueueBindSparse")) 11719 skip_call = true; 11720 } 11721 } 11722 for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) { 11723 for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) { 11724 auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k]; 11725 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size}, 11726 (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 11727 "vkQueueBindSparse")) 11728 skip_call = true; 11729 } 11730 } 11731 for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) { 11732 for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) { 11733 auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k]; 11734 // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data 11735 VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4; 11736 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size}, 11737 (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 11738 "vkQueueBindSparse")) 11739 skip_call = true; 11740 } 11741 } 11742 11743 std::vector<SEMAPHORE_WAIT> semaphore_waits; 11744 std::vector<VkSemaphore> semaphore_signals; 11745 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) { 11746 VkSemaphore semaphore = bindInfo.pWaitSemaphores[i]; 11747 auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 11748 if (pSemaphore) { 11749 if (pSemaphore->signaled) { 11750 if (pSemaphore->signaler.first != VK_NULL_HANDLE) { 11751 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second}); 11752 pSemaphore->in_use.fetch_add(1); 11753 } 11754 pSemaphore->signaler.first = VK_NULL_HANDLE; 11755 pSemaphore->signaled = false; 11756 } else { 11757 skip_call |= log_msg( 11758 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 11759 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 11760 "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", 11761 queue, reinterpret_cast<const uint64_t &>(semaphore)); 11762 } 11763 } 11764 } 11765 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) { 11766 VkSemaphore semaphore = bindInfo.pSignalSemaphores[i]; 11767 auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 11768 if (pSemaphore) { 11769 if (pSemaphore->signaled) { 11770 skip_call = 11771 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 11772 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 11773 "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64 11774 ", but that semaphore is already signaled.", 11775 queue, reinterpret_cast<const uint64_t &>(semaphore)); 11776 } 11777 else { 11778 pSemaphore->signaler.first = queue; 11779 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1; 11780 pSemaphore->signaled = true; 11781 pSemaphore->in_use.fetch_add(1); 11782 semaphore_signals.push_back(semaphore); 11783 } 11784 } 11785 } 11786 11787 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), 11788 semaphore_waits, 11789 semaphore_signals, 11790 bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE); 11791 } 11792 11793 if (pFence && !bindInfoCount) { 11794 // No work to do, just dropping a fence in the queue by itself. 11795 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), 11796 std::vector<SEMAPHORE_WAIT>(), 11797 std::vector<VkSemaphore>(), 11798 fence); 11799 } 11800 11801 lock.unlock(); 11802 11803 if (!skip_call) 11804 return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence); 11805 11806 return result; 11807} 11808 11809VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, 11810 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) { 11811 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 11812 VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore); 11813 if (result == VK_SUCCESS) { 11814 std::lock_guard<std::mutex> lock(global_lock); 11815 SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore]; 11816 sNode->signaler.first = VK_NULL_HANDLE; 11817 sNode->signaler.second = 0; 11818 sNode->signaled = false; 11819 } 11820 return result; 11821} 11822 11823VKAPI_ATTR VkResult VKAPI_CALL 11824CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) { 11825 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 11826 VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent); 11827 if (result == VK_SUCCESS) { 11828 std::lock_guard<std::mutex> lock(global_lock); 11829 dev_data->eventMap[*pEvent].needsSignaled = false; 11830 dev_data->eventMap[*pEvent].write_in_use = 0; 11831 dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0); 11832 } 11833 return result; 11834} 11835 11836static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, VkSwapchainCreateInfoKHR const *pCreateInfo, 11837 SURFACE_STATE *surface_state, SWAPCHAIN_NODE *old_swapchain_state) { 11838 auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain; 11839 11840 // TODO: revisit this. some of these rules are being relaxed. 11841 if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) { 11842 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 11843 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS", 11844 "vkCreateSwapchainKHR(): surface has an existing swapchain other than oldSwapchain")) 11845 return true; 11846 } 11847 if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) { 11848 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 11849 reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE, 11850 "DS", "vkCreateSwapchainKHR(): pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface")) 11851 return true; 11852 } 11853 auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device); 11854 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) { 11855 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 11856 reinterpret_cast<uint64_t>(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS", 11857 "vkCreateSwapchainKHR(): surface capabilities not retrieved for this physical device")) 11858 return true; 11859 } else { // have valid capabilities 11860 auto &capabilities = physical_device_state->surfaceCapabilities; 11861 // Validate pCreateInfo->minImageCount against 11862 // VkSurfaceCapabilitiesKHR::{min|max}ImageCount: 11863 11864 if (pCreateInfo->minImageCount < capabilities.minImageCount) { 11865 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 11866 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02331, "DS", 11867 "vkCreateSwapchainKHR() called with pCreateInfo->minImageCount = %d, which is outside the bounds returned " 11868 "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s", 11869 pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount, 11870 validation_error_map[VALIDATION_ERROR_02331])) 11871 return true; 11872 } 11873 11874 if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) { 11875 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 11876 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02332, "DS", 11877 "vkCreateSwapchainKHR() called with pCreateInfo->minImageCount = %d, which is outside the bounds returned " 11878 "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s", 11879 pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount, 11880 validation_error_map[VALIDATION_ERROR_02332])) 11881 return true; 11882 } 11883 11884 // Validate pCreateInfo->imageExtent against 11885 // VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent: 11886 if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) && 11887 ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) || 11888 (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) || 11889 (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) || 11890 (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) { 11891 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 11892 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS", 11893 "vkCreateSwapchainKHR() called with pCreateInfo->imageExtent = (%d,%d), which is outside the " 11894 "bounds returned by vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), " 11895 "minImageExtent = (%d,%d), maxImageExtent = (%d,%d). %s", 11896 pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width, 11897 capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height, 11898 capabilities.maxImageExtent.width, capabilities.maxImageExtent.height, 11899 validation_error_map[VALIDATION_ERROR_02334])) 11900 return true; 11901 } 11902 if ((capabilities.currentExtent.width != kSurfaceSizeFromSwapchain) && 11903 ((pCreateInfo->imageExtent.width != capabilities.currentExtent.width) || 11904 (pCreateInfo->imageExtent.height != capabilities.currentExtent.height))) { 11905 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 11906 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS", 11907 "vkCreateSwapchainKHR() called with pCreateInfo->imageExtent = (%d,%d), which is not equal to the " 11908 "currentExtent = (%d,%d) returned by vkGetPhysicalDeviceSurfaceCapabilitiesKHR(). %s", 11909 pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width, 11910 capabilities.currentExtent.height, 11911 validation_error_map[VALIDATION_ERROR_02334])) 11912 return true; 11913 } 11914 // pCreateInfo->preTransform should have exactly one bit set, and that 11915 // bit must also be set in VkSurfaceCapabilitiesKHR::supportedTransforms. 11916 if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) || 11917 !(pCreateInfo->preTransform & capabilities.supportedTransforms)) { 11918 // This is an error situation; one for which we'd like to give 11919 // the developer a helpful, multi-line error message. Build it 11920 // up a little at a time, and then log it: 11921 std::string errorString = ""; 11922 char str[1024]; 11923 // Here's the first part of the message: 11924 sprintf(str, "vkCreateSwapchainKHR() called with a non-supported " 11925 "pCreateInfo->preTransform (i.e. %s). " 11926 "Supported values are:\n", 11927 string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform)); 11928 errorString += str; 11929 for (int i = 0; i < 32; i++) { 11930 // Build up the rest of the message: 11931 if ((1 << i) & capabilities.supportedTransforms) { 11932 const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i)); 11933 sprintf(str, " %s\n", newStr); 11934 errorString += str; 11935 } 11936 } 11937 // Log the message that we've built up: 11938 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 11939 reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02339, "DS", "%s. %s", 11940 errorString.c_str(), validation_error_map[VALIDATION_ERROR_02339])) 11941 return true; 11942 } 11943 11944 // pCreateInfo->compositeAlpha should have exactly one bit set, and that 11945 // bit must also be set in VkSurfaceCapabilitiesKHR::supportedCompositeAlpha 11946 if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) || 11947 !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) { 11948 // This is an error situation; one for which we'd like to give 11949 // the developer a helpful, multi-line error message. Build it 11950 // up a little at a time, and then log it: 11951 std::string errorString = ""; 11952 char str[1024]; 11953 // Here's the first part of the message: 11954 sprintf(str, "vkCreateSwapchainKHR() called with a non-supported " 11955 "pCreateInfo->compositeAlpha (i.e. %s). " 11956 "Supported values are:\n", 11957 string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha)); 11958 errorString += str; 11959 for (int i = 0; i < 32; i++) { 11960 // Build up the rest of the message: 11961 if ((1 << i) & capabilities.supportedCompositeAlpha) { 11962 const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i)); 11963 sprintf(str, " %s\n", newStr); 11964 errorString += str; 11965 } 11966 } 11967 // Log the message that we've built up: 11968 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 11969 reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02340, "DS", "%s. %s", 11970 errorString.c_str(), validation_error_map[VALIDATION_ERROR_02340])) 11971 return true; 11972 } 11973 // Validate pCreateInfo->imageArrayLayers against 11974 // VkSurfaceCapabilitiesKHR::maxImageArrayLayers: 11975 if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) { 11976 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 11977 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02335, "DS", 11978 "vkCreateSwapchainKHR() called with a non-supported pCreateInfo->imageArrayLayers (i.e. %d). " 11979 "Minimum value is 1, maximum value is %d. %s", 11980 pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers, 11981 validation_error_map[VALIDATION_ERROR_02335])) 11982 return true; 11983 } 11984 // Validate pCreateInfo->imageUsage against 11985 // VkSurfaceCapabilitiesKHR::supportedUsageFlags: 11986 if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) { 11987 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 11988 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02336, "DS", 11989 "vkCreateSwapchainKHR() called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). " 11990 "Supported flag bits are 0x%08x. %s", 11991 pCreateInfo->imageUsage, capabilities.supportedUsageFlags, validation_error_map[VALIDATION_ERROR_02336])) 11992 return true; 11993 } 11994 } 11995 11996 // Validate pCreateInfo values with the results of 11997 // vkGetPhysicalDeviceSurfaceFormatsKHR(): 11998 if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) { 11999 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 12000 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS", 12001 "vkCreateSwapchainKHR() called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().")) 12002 return true; 12003 } else { 12004 // Validate pCreateInfo->imageFormat against 12005 // VkSurfaceFormatKHR::format: 12006 bool foundFormat = false; 12007 bool foundColorSpace = false; 12008 bool foundMatch = false; 12009 for (auto const &format : physical_device_state->surface_formats) { 12010 if (pCreateInfo->imageFormat == format.format) { 12011 // Validate pCreateInfo->imageColorSpace against 12012 // VkSurfaceFormatKHR::colorSpace: 12013 foundFormat = true; 12014 if (pCreateInfo->imageColorSpace == format.colorSpace) { 12015 foundMatch = true; 12016 break; 12017 } 12018 } else { 12019 if (pCreateInfo->imageColorSpace == format.colorSpace) { 12020 foundColorSpace = true; 12021 } 12022 } 12023 } 12024 if (!foundMatch) { 12025 if (!foundFormat) { 12026 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 12027 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS", 12028 "vkCreateSwapchainKHR() called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s", 12029 pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_02333])) 12030 return true; 12031 } 12032 if (!foundColorSpace) { 12033 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 12034 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS", 12035 "vkCreateSwapchainKHR() called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s", 12036 pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_02333])) 12037 return true; 12038 } 12039 } 12040 } 12041 12042 // Validate pCreateInfo values with the results of 12043 // vkGetPhysicalDeviceSurfacePresentModesKHR(): 12044 if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) { 12045 /* FIFO is required to always be supported */ 12046 if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) { 12047 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 12048 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, 12049 "DS", "vkCreateSwapchainKHR() called before calling " 12050 "vkGetPhysicalDeviceSurfacePresentModesKHR().")) 12051 return true; 12052 } 12053 } else { 12054 // Validate pCreateInfo->presentMode against 12055 // vkGetPhysicalDeviceSurfacePresentModesKHR(): 12056 bool foundMatch = std::find(physical_device_state->present_modes.begin(), 12057 physical_device_state->present_modes.end(), 12058 pCreateInfo->presentMode) != physical_device_state->present_modes.end(); 12059 if (!foundMatch) { 12060 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 12061 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02341, "DS", 12062 "vkCreateSwapchainKHR() called with a non-supported pCreateInfo->presentMode (i.e. %s). %s", 12063 string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_02341])) 12064 return true; 12065 } 12066 } 12067 12068 12069 return false; 12070} 12071 12072VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, 12073 const VkAllocationCallbacks *pAllocator, 12074 VkSwapchainKHR *pSwapchain) { 12075 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 12076 auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface); 12077 auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain); 12078 12079 if (PreCallValidateCreateSwapchainKHR(dev_data, pCreateInfo, surface_state, old_swapchain_state)) 12080 return VK_ERROR_VALIDATION_FAILED_EXT; 12081 12082 VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain); 12083 12084 if (VK_SUCCESS == result) { 12085 std::lock_guard<std::mutex> lock(global_lock); 12086 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain)); 12087 surface_state->swapchain = swapchain_state.get(); 12088 dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state); 12089 } else { 12090 surface_state->swapchain = nullptr; 12091 } 12092 12093 // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced. 12094 if (old_swapchain_state) { 12095 old_swapchain_state->replaced = true; 12096 } 12097 surface_state->old_swapchain = old_swapchain_state; 12098 12099 return result; 12100} 12101 12102VKAPI_ATTR void VKAPI_CALL 12103DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { 12104 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 12105 bool skip_call = false; 12106 12107 std::unique_lock<std::mutex> lock(global_lock); 12108 auto swapchain_data = getSwapchainNode(dev_data, swapchain); 12109 if (swapchain_data) { 12110 if (swapchain_data->images.size() > 0) { 12111 for (auto swapchain_image : swapchain_data->images) { 12112 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image); 12113 if (image_sub != dev_data->imageSubresourceMap.end()) { 12114 for (auto imgsubpair : image_sub->second) { 12115 auto image_item = dev_data->imageLayoutMap.find(imgsubpair); 12116 if (image_item != dev_data->imageLayoutMap.end()) { 12117 dev_data->imageLayoutMap.erase(image_item); 12118 } 12119 } 12120 dev_data->imageSubresourceMap.erase(image_sub); 12121 } 12122 skip_call = 12123 ClearMemoryObjectBindings(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT); 12124 dev_data->imageMap.erase(swapchain_image); 12125 } 12126 } 12127 12128 auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface); 12129 if (surface_state) { 12130 if (surface_state->swapchain == swapchain_data) 12131 surface_state->swapchain = nullptr; 12132 if (surface_state->old_swapchain == swapchain_data) 12133 surface_state->old_swapchain = nullptr; 12134 } 12135 12136 dev_data->device_extensions.swapchainMap.erase(swapchain); 12137 } 12138 lock.unlock(); 12139 if (!skip_call) 12140 dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator); 12141} 12142 12143VKAPI_ATTR VkResult VKAPI_CALL 12144GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) { 12145 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 12146 VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages); 12147 12148 if (result == VK_SUCCESS && pSwapchainImages != NULL) { 12149 // This should never happen and is checked by param checker. 12150 if (!pCount) 12151 return result; 12152 std::lock_guard<std::mutex> lock(global_lock); 12153 const size_t count = *pCount; 12154 auto swapchain_node = getSwapchainNode(dev_data, swapchain); 12155 if (swapchain_node && !swapchain_node->images.empty()) { 12156 // TODO : Not sure I like the memcmp here, but it works 12157 const bool mismatch = (swapchain_node->images.size() != count || 12158 memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count)); 12159 if (mismatch) { 12160 // TODO: Verify against Valid Usage section of extension 12161 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 12162 (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN", 12163 "vkGetSwapchainInfoKHR(0x%" PRIx64 12164 ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data", 12165 (uint64_t)(swapchain)); 12166 } 12167 } 12168 for (uint32_t i = 0; i < *pCount; ++i) { 12169 IMAGE_LAYOUT_NODE image_layout_node; 12170 image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED; 12171 image_layout_node.format = swapchain_node->createInfo.imageFormat; 12172 // Add imageMap entries for each swapchain image 12173 VkImageCreateInfo image_ci = {}; 12174 image_ci.mipLevels = 1; 12175 image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers; 12176 image_ci.usage = swapchain_node->createInfo.imageUsage; 12177 image_ci.format = swapchain_node->createInfo.imageFormat; 12178 image_ci.samples = VK_SAMPLE_COUNT_1_BIT; 12179 image_ci.extent.width = swapchain_node->createInfo.imageExtent.width; 12180 image_ci.extent.height = swapchain_node->createInfo.imageExtent.height; 12181 image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode; 12182 dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci)); 12183 auto &image_state = dev_data->imageMap[pSwapchainImages[i]]; 12184 image_state->valid = false; 12185 image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY; 12186 swapchain_node->images.push_back(pSwapchainImages[i]); 12187 ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()}; 12188 dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair); 12189 dev_data->imageLayoutMap[subpair] = image_layout_node; 12190 dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain; 12191 } 12192 } 12193 return result; 12194} 12195 12196VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { 12197 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 12198 bool skip_call = false; 12199 12200 std::lock_guard<std::mutex> lock(global_lock); 12201 auto queue_state = getQueueState(dev_data, queue); 12202 12203 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { 12204 auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]); 12205 if (pSemaphore && !pSemaphore->signaled) { 12206 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 12207 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, 12208 "DS", "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue, 12209 reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i])); 12210 } 12211 } 12212 12213 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { 12214 auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]); 12215 if (swapchain_data) { 12216 if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) { 12217 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 12218 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, 12219 "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.", 12220 pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size()); 12221 } 12222 else { 12223 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]]; 12224 auto image_state = getImageState(dev_data, image); 12225 skip_call |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()"); 12226 12227 if (!image_state->acquired) { 12228 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 12229 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED, 12230 "DS", "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", 12231 pPresentInfo->pImageIndices[i]); 12232 } 12233 12234 vector<VkImageLayout> layouts; 12235 if (FindLayouts(dev_data, image, layouts)) { 12236 for (auto layout : layouts) { 12237 if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { 12238 skip_call |= 12239 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, 12240 reinterpret_cast<uint64_t &>(queue), __LINE__, VALIDATION_ERROR_01964, "DS", 12241 "Images passed to present must be in layout " 12242 "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s. %s", 12243 string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_01964]); 12244 } 12245 } 12246 } 12247 } 12248 12249 // All physical devices and queue families are required to be able 12250 // to present to any native window on Android; require the 12251 // application to have established support on any other platform. 12252 if (!dev_data->instance_data->androidSurfaceExtensionEnabled) { 12253 auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface); 12254 auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex}); 12255 12256 if (support_it == surface_state->gpu_queue_support.end()) { 12257 skip_call |= 12258 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 12259 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, 12260 DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS", "vkQueuePresentKHR: Presenting image without calling " 12261 "vkGetPhysicalDeviceSurfaceSupportKHR"); 12262 } else if (!support_it->second) { 12263 skip_call |= 12264 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 12265 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_01961, 12266 "DS", "vkQueuePresentKHR: Presenting image on queue that cannot " 12267 "present to this surface. %s", 12268 validation_error_map[VALIDATION_ERROR_01961]); 12269 } 12270 } 12271 } 12272 } 12273 12274 if (skip_call) { 12275 return VK_ERROR_VALIDATION_FAILED_EXT; 12276 } 12277 12278 VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo); 12279 12280 if (result != VK_ERROR_VALIDATION_FAILED_EXT) { 12281 // Semaphore waits occur before error generation, if the call reached 12282 // the ICD. (Confirm?) 12283 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { 12284 auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]); 12285 if (pSemaphore) { 12286 pSemaphore->signaler.first = VK_NULL_HANDLE; 12287 pSemaphore->signaled = false; 12288 } 12289 } 12290 12291 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { 12292 // Note: this is imperfect, in that we can get confused about what 12293 // did or didn't succeed-- but if the app does that, it's confused 12294 // itself just as much. 12295 auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result; 12296 12297 if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) 12298 continue; // this present didn't actually happen. 12299 12300 // Mark the image as having been released to the WSI 12301 auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]); 12302 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]]; 12303 auto image_state = getImageState(dev_data, image); 12304 image_state->acquired = false; 12305 } 12306 12307 // Note: even though presentation is directed to a queue, there is no 12308 // direct ordering between QP and subsequent work, so QP (and its 12309 // semaphore waits) /never/ participate in any completion proof. 12310 } 12311 12312 return result; 12313} 12314 12315VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, 12316 const VkSwapchainCreateInfoKHR *pCreateInfos, 12317 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) { 12318 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 12319 std::unique_lock<std::mutex> lock(global_lock); 12320 VkResult result = 12321 dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains); 12322 return result; 12323} 12324 12325VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, 12326 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) { 12327 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 12328 bool skip_call = false; 12329 12330 std::unique_lock<std::mutex> lock(global_lock); 12331 12332 if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) { 12333 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 12334 reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS", 12335 "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way " 12336 "to determine the completion of this operation."); 12337 } 12338 12339 auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 12340 if (pSemaphore && pSemaphore->signaled) { 12341 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 12342 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, VALIDATION_ERROR_01952, "DS", 12343 "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s", 12344 validation_error_map[VALIDATION_ERROR_01952]); 12345 } 12346 12347 auto pFence = getFenceNode(dev_data, fence); 12348 if (pFence) { 12349 skip_call |= ValidateFenceForSubmit(dev_data, pFence); 12350 } 12351 12352 auto swapchain_data = getSwapchainNode(dev_data, swapchain); 12353 12354 if (swapchain_data->replaced) { 12355 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 12356 reinterpret_cast<uint64_t &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS", 12357 "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still " 12358 "present any images it has acquired, but cannot acquire any more."); 12359 } 12360 12361 auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device); 12362 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) { 12363 uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(), 12364 [=](VkImage image) { return getImageState(dev_data, image)->acquired; }); 12365 if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) { 12366 skip_call |= 12367 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 12368 reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS", 12369 "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")", 12370 acquired_images); 12371 } 12372 } 12373 lock.unlock(); 12374 12375 if (skip_call) 12376 return VK_ERROR_VALIDATION_FAILED_EXT; 12377 12378 VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex); 12379 12380 lock.lock(); 12381 if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) { 12382 if (pFence) { 12383 pFence->state = FENCE_INFLIGHT; 12384 pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof. 12385 } 12386 12387 // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore 12388 if (pSemaphore) { 12389 pSemaphore->signaled = true; 12390 pSemaphore->signaler.first = VK_NULL_HANDLE; 12391 } 12392 12393 // Mark the image as acquired. 12394 auto image = swapchain_data->images[*pImageIndex]; 12395 auto image_state = getImageState(dev_data, image); 12396 image_state->acquired = true; 12397 } 12398 lock.unlock(); 12399 12400 return result; 12401} 12402 12403VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, 12404 VkPhysicalDevice *pPhysicalDevices) { 12405 bool skip_call = false; 12406 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map); 12407 12408 if (instance_data) { 12409 // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS 12410 if (NULL == pPhysicalDevices) { 12411 instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT; 12412 } else { 12413 if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) { 12414 // Flag warning here. You can call this without having queried the count, but it may not be 12415 // robust on platforms with multiple physical devices. 12416 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 12417 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL", 12418 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first " 12419 "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount."); 12420 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state 12421 else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) { 12422 // Having actual count match count from app is not a requirement, so this can be a warning 12423 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 12424 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL", 12425 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count " 12426 "supported by this instance is %u.", 12427 *pPhysicalDeviceCount, instance_data->physical_devices_count); 12428 } 12429 instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS; 12430 } 12431 if (skip_call) { 12432 return VK_ERROR_VALIDATION_FAILED_EXT; 12433 } 12434 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices); 12435 if (NULL == pPhysicalDevices) { 12436 instance_data->physical_devices_count = *pPhysicalDeviceCount; 12437 } else if (result == VK_SUCCESS){ // Save physical devices 12438 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) { 12439 auto & phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]]; 12440 phys_device_state.phys_device = pPhysicalDevices[i]; 12441 // Init actual features for each physical device 12442 instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features); 12443 } 12444 } 12445 return result; 12446 } else { 12447 // This seems redundant with object_tracker 12448 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, 12449 VALIDATION_ERROR_00023, "DL", "Invalid instance (0x%p) passed into vkEnumeratePhysicalDevices(). %s", instance, 12450 validation_error_map[VALIDATION_ERROR_00023]); 12451 } 12452 return VK_ERROR_VALIDATION_FAILED_EXT; 12453} 12454 12455VKAPI_ATTR void VKAPI_CALL 12456GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, 12457 VkQueueFamilyProperties *pQueueFamilyProperties) { 12458 bool skip_call = false; 12459 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map); 12460 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice); 12461 if (physical_device_state) { 12462 if (!pQueueFamilyProperties) { 12463 physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT; 12464 } 12465 else { 12466 // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to 12467 // get count 12468 if (UNCALLED == physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) { 12469 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 12470 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL", 12471 "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL " 12472 "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ " 12473 "NULL pQueueFamilyProperties to query pCount."); 12474 } 12475 // Then verify that pCount that is passed in on second call matches what was returned 12476 if (physical_device_state->queueFamilyPropertiesCount != *pCount) { 12477 12478 // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so 12479 // provide as warning 12480 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 12481 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL", 12482 "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count " 12483 "supported by this physicalDevice is %u.", 12484 *pCount, physical_device_state->queueFamilyPropertiesCount); 12485 } 12486 physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS; 12487 } 12488 if (skip_call) { 12489 return; 12490 } 12491 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties); 12492 if (!pQueueFamilyProperties) { 12493 physical_device_state->queueFamilyPropertiesCount = *pCount; 12494 } 12495 else { // Save queue family properties 12496 if (physical_device_state->queue_family_properties.size() < *pCount) 12497 physical_device_state->queue_family_properties.resize(*pCount); 12498 for (uint32_t i = 0; i < *pCount; i++) { 12499 physical_device_state->queue_family_properties[i] = pQueueFamilyProperties[i]; 12500 } 12501 } 12502 } 12503 else { 12504 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 12505 __LINE__, VALIDATION_ERROR_00028, "DL", 12506 "Invalid physicalDevice (0x%p) passed into vkGetPhysicalDeviceQueueFamilyProperties(). %s", physicalDevice, 12507 validation_error_map[VALIDATION_ERROR_00028]); 12508 } 12509} 12510 12511template<typename TCreateInfo, typename FPtr> 12512static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, 12513 VkAllocationCallbacks const *pAllocator, VkSurfaceKHR *pSurface, 12514 FPtr fptr) 12515{ 12516 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map); 12517 12518 // Call down the call chain: 12519 VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface); 12520 12521 if (result == VK_SUCCESS) { 12522 std::unique_lock<std::mutex> lock(global_lock); 12523 instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface); 12524 lock.unlock(); 12525 } 12526 12527 return result; 12528} 12529 12530VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) { 12531 bool skip_call = false; 12532 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map); 12533 std::unique_lock<std::mutex> lock(global_lock); 12534 auto surface_state = getSurfaceState(instance_data, surface); 12535 12536 if (surface_state) { 12537 // TODO: track swapchains created from this surface. 12538 instance_data->surface_map.erase(surface); 12539 } 12540 lock.unlock(); 12541 12542 if (!skip_call) { 12543 // Call down the call chain: 12544 instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator); 12545 } 12546} 12547 12548VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo, 12549 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { 12550 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR); 12551} 12552 12553#ifdef VK_USE_PLATFORM_ANDROID_KHR 12554VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo, 12555 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { 12556 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR); 12557} 12558#endif // VK_USE_PLATFORM_ANDROID_KHR 12559 12560#ifdef VK_USE_PLATFORM_MIR_KHR 12561VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo, 12562 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { 12563 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR); 12564} 12565#endif // VK_USE_PLATFORM_MIR_KHR 12566 12567#ifdef VK_USE_PLATFORM_WAYLAND_KHR 12568VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo, 12569 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { 12570 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR); 12571} 12572#endif // VK_USE_PLATFORM_WAYLAND_KHR 12573 12574#ifdef VK_USE_PLATFORM_WIN32_KHR 12575VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo, 12576 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { 12577 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR); 12578} 12579#endif // VK_USE_PLATFORM_WIN32_KHR 12580 12581#ifdef VK_USE_PLATFORM_XCB_KHR 12582VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo, 12583 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { 12584 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR); 12585} 12586#endif // VK_USE_PLATFORM_XCB_KHR 12587 12588#ifdef VK_USE_PLATFORM_XLIB_KHR 12589VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo, 12590 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { 12591 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR); 12592} 12593#endif // VK_USE_PLATFORM_XLIB_KHR 12594 12595 12596VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, 12597 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) { 12598 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map); 12599 12600 std::unique_lock<std::mutex> lock(global_lock); 12601 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice); 12602 lock.unlock(); 12603 12604 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, 12605 pSurfaceCapabilities); 12606 12607 if (result == VK_SUCCESS) { 12608 physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS; 12609 physical_device_state->surfaceCapabilities = *pSurfaceCapabilities; 12610 } 12611 12612 return result; 12613} 12614 12615 12616VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, 12617 VkSurfaceKHR surface, VkBool32 *pSupported) { 12618 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map); 12619 std::unique_lock<std::mutex> lock(global_lock); 12620 auto surface_state = getSurfaceState(instance_data, surface); 12621 lock.unlock(); 12622 12623 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, 12624 pSupported); 12625 12626 if (result == VK_SUCCESS) { 12627 surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported != 0); 12628 } 12629 12630 return result; 12631} 12632 12633 12634VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, 12635 uint32_t *pPresentModeCount, 12636 VkPresentModeKHR *pPresentModes) { 12637 bool skip_call = false; 12638 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map); 12639 std::unique_lock<std::mutex> lock(global_lock); 12640 // TODO: this isn't quite right. available modes may differ by surface AND physical device. 12641 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice); 12642 auto & call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState; 12643 12644 if (pPresentModes) { 12645 // Compare the preliminary value of *pPresentModeCount with the value this time: 12646 auto prev_mode_count = (uint32_t) physical_device_state->present_modes.size(); 12647 switch (call_state) { 12648 case UNCALLED: 12649 skip_call |= log_msg( 12650 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 12651 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL", 12652 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive " 12653 "value has been seen for pPresentModeCount."); 12654 break; 12655 default: 12656 // both query count and query details 12657 if (*pPresentModeCount != prev_mode_count) { 12658 skip_call |= log_msg( 12659 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 12660 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL", 12661 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs from the value " 12662 "(%u) that was returned when pPresentModes was NULL.", 12663 *pPresentModeCount, prev_mode_count); 12664 } 12665 break; 12666 } 12667 } 12668 lock.unlock(); 12669 12670 if (skip_call) 12671 return VK_ERROR_VALIDATION_FAILED_EXT; 12672 12673 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount, pPresentModes); 12674 12675 if (result == VK_SUCCESS || result == VK_INCOMPLETE) { 12676 12677 lock.lock(); 12678 12679 if (*pPresentModeCount) { 12680 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT; 12681 if (*pPresentModeCount > physical_device_state->present_modes.size()) 12682 physical_device_state->present_modes.resize(*pPresentModeCount); 12683 } 12684 if (pPresentModes) { 12685 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS; 12686 for (uint32_t i = 0; i < *pPresentModeCount; i++) { 12687 physical_device_state->present_modes[i] = pPresentModes[i]; 12688 } 12689 } 12690 } 12691 12692 return result; 12693} 12694 12695 12696VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, 12697 uint32_t *pSurfaceFormatCount, 12698 VkSurfaceFormatKHR *pSurfaceFormats) { 12699 bool skip_call = false; 12700 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map); 12701 std::unique_lock<std::mutex> lock(global_lock); 12702 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice); 12703 auto & call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState; 12704 12705 if (pSurfaceFormats) { 12706 auto prev_format_count = (uint32_t) physical_device_state->surface_formats.size(); 12707 12708 switch (call_state) { 12709 case UNCALLED: 12710 // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't 12711 // previously call this function with a NULL value of pSurfaceFormats: 12712 skip_call |= log_msg( 12713 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 12714 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL", 12715 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive " 12716 "value has been seen for pSurfaceFormats."); 12717 break; 12718 default: 12719 if (prev_format_count != *pSurfaceFormatCount) { 12720 skip_call |= log_msg( 12721 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 12722 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL", 12723 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats set to " 12724 "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.", 12725 *pSurfaceFormatCount, prev_format_count); 12726 } 12727 break; 12728 } 12729 } 12730 lock.unlock(); 12731 12732 if (skip_call) 12733 return VK_ERROR_VALIDATION_FAILED_EXT; 12734 12735 // Call down the call chain: 12736 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount, 12737 pSurfaceFormats); 12738 12739 if (result == VK_SUCCESS || result == VK_INCOMPLETE) { 12740 12741 lock.lock(); 12742 12743 if (*pSurfaceFormatCount) { 12744 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT; 12745 if (*pSurfaceFormatCount > physical_device_state->surface_formats.size()) 12746 physical_device_state->surface_formats.resize(*pSurfaceFormatCount); 12747 } 12748 if (pSurfaceFormats) { 12749 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS; 12750 for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) { 12751 physical_device_state->surface_formats[i] = pSurfaceFormats[i]; 12752 } 12753 } 12754 } 12755 return result; 12756} 12757 12758 12759VKAPI_ATTR VkResult VKAPI_CALL 12760CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, 12761 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) { 12762 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map); 12763 VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); 12764 if (VK_SUCCESS == res) { 12765 std::lock_guard<std::mutex> lock(global_lock); 12766 res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback); 12767 } 12768 return res; 12769} 12770 12771VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, 12772 VkDebugReportCallbackEXT msgCallback, 12773 const VkAllocationCallbacks *pAllocator) { 12774 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map); 12775 instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); 12776 std::lock_guard<std::mutex> lock(global_lock); 12777 layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator); 12778} 12779 12780VKAPI_ATTR void VKAPI_CALL 12781DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object, 12782 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { 12783 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map); 12784 instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg); 12785} 12786 12787VKAPI_ATTR VkResult VKAPI_CALL 12788EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { 12789 return util_GetLayerProperties(1, &global_layer, pCount, pProperties); 12790} 12791 12792VKAPI_ATTR VkResult VKAPI_CALL 12793EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) { 12794 return util_GetLayerProperties(1, &global_layer, pCount, pProperties); 12795} 12796 12797VKAPI_ATTR VkResult VKAPI_CALL 12798EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) { 12799 if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) 12800 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties); 12801 12802 return VK_ERROR_LAYER_NOT_PRESENT; 12803} 12804 12805VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 12806 const char *pLayerName, uint32_t *pCount, 12807 VkExtensionProperties *pProperties) { 12808 if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) 12809 return util_GetExtensionProperties(0, NULL, pCount, pProperties); 12810 12811 assert(physicalDevice); 12812 12813 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map); 12814 return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties); 12815} 12816 12817static PFN_vkVoidFunction 12818intercept_core_instance_command(const char *name); 12819 12820static PFN_vkVoidFunction 12821intercept_core_device_command(const char *name); 12822 12823static PFN_vkVoidFunction 12824intercept_khr_swapchain_command(const char *name, VkDevice dev); 12825 12826static PFN_vkVoidFunction 12827intercept_khr_surface_command(const char *name, VkInstance instance); 12828 12829VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) { 12830 PFN_vkVoidFunction proc = intercept_core_device_command(funcName); 12831 if (proc) 12832 return proc; 12833 12834 assert(dev); 12835 12836 proc = intercept_khr_swapchain_command(funcName, dev); 12837 if (proc) 12838 return proc; 12839 12840 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map); 12841 12842 auto &table = dev_data->dispatch_table; 12843 if (!table.GetDeviceProcAddr) 12844 return nullptr; 12845 return table.GetDeviceProcAddr(dev, funcName); 12846} 12847 12848VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) { 12849 PFN_vkVoidFunction proc = intercept_core_instance_command(funcName); 12850 if (!proc) 12851 proc = intercept_core_device_command(funcName); 12852 if (!proc) 12853 proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE); 12854 if (!proc) 12855 proc = intercept_khr_surface_command(funcName, instance); 12856 if (proc) 12857 return proc; 12858 12859 assert(instance); 12860 12861 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map); 12862 proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName); 12863 if (proc) 12864 return proc; 12865 12866 auto &table = instance_data->dispatch_table; 12867 if (!table.GetInstanceProcAddr) 12868 return nullptr; 12869 return table.GetInstanceProcAddr(instance, funcName); 12870} 12871 12872static PFN_vkVoidFunction 12873intercept_core_instance_command(const char *name) { 12874 static const struct { 12875 const char *name; 12876 PFN_vkVoidFunction proc; 12877 } core_instance_commands[] = { 12878 { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) }, 12879 { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) }, 12880 { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) }, 12881 { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) }, 12882 { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) }, 12883 { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) }, 12884 { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) }, 12885 { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) }, 12886 { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) }, 12887 { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) }, 12888 { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) }, 12889 }; 12890 12891 for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) { 12892 if (!strcmp(core_instance_commands[i].name, name)) 12893 return core_instance_commands[i].proc; 12894 } 12895 12896 return nullptr; 12897} 12898 12899static PFN_vkVoidFunction 12900intercept_core_device_command(const char *name) { 12901 static const struct { 12902 const char *name; 12903 PFN_vkVoidFunction proc; 12904 } core_device_commands[] = { 12905 {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)}, 12906 {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)}, 12907 {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)}, 12908 {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)}, 12909 {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)}, 12910 {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)}, 12911 {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)}, 12912 {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)}, 12913 {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)}, 12914 {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)}, 12915 {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)}, 12916 {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)}, 12917 {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)}, 12918 {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)}, 12919 {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)}, 12920 {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)}, 12921 {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)}, 12922 {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)}, 12923 {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)}, 12924 {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)}, 12925 {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)}, 12926 {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)}, 12927 {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)}, 12928 {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)}, 12929 {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)}, 12930 {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)}, 12931 {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)}, 12932 {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)}, 12933 {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)}, 12934 {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)}, 12935 {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)}, 12936 {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)}, 12937 {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)}, 12938 {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)}, 12939 {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)}, 12940 {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)}, 12941 {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)}, 12942 {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)}, 12943 {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)}, 12944 {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)}, 12945 {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)}, 12946 {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)}, 12947 {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)}, 12948 {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)}, 12949 {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)}, 12950 {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)}, 12951 {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)}, 12952 {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)}, 12953 {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)}, 12954 {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)}, 12955 {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)}, 12956 {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)}, 12957 {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)}, 12958 {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)}, 12959 {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)}, 12960 {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)}, 12961 {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)}, 12962 {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)}, 12963 {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)}, 12964 {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)}, 12965 {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)}, 12966 {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)}, 12967 {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)}, 12968 {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)}, 12969 {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)}, 12970 {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)}, 12971 {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)}, 12972 {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)}, 12973 {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)}, 12974 {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)}, 12975 {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)}, 12976 {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)}, 12977 {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)}, 12978 {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)}, 12979 {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)}, 12980 {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)}, 12981 {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)}, 12982 {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)}, 12983 {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)}, 12984 {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)}, 12985 {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)}, 12986 {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)}, 12987 {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)}, 12988 {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)}, 12989 {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)}, 12990 {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)}, 12991 {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)}, 12992 {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)}, 12993 {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)}, 12994 {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)}, 12995 {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)}, 12996 {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)}, 12997 {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)}, 12998 {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)}, 12999 {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)}, 13000 {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)}, 13001 {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)}, 13002 {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)}, 13003 {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)}, 13004 {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)}, 13005 {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)}, 13006 {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)}, 13007 {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)}, 13008 {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)}, 13009 {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)}, 13010 {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)}, 13011 {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)}, 13012 {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)}, 13013 {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)}, 13014 {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)}, 13015 {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)}, 13016 {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)}, 13017 {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)}, 13018 {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)}, 13019 {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)}, 13020 {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)}, 13021 }; 13022 13023 for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) { 13024 if (!strcmp(core_device_commands[i].name, name)) 13025 return core_device_commands[i].proc; 13026 } 13027 13028 return nullptr; 13029} 13030 13031static PFN_vkVoidFunction 13032intercept_khr_swapchain_command(const char *name, VkDevice dev) { 13033 static const struct { 13034 const char *name; 13035 PFN_vkVoidFunction proc; 13036 } khr_swapchain_commands[] = { 13037 { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) }, 13038 { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) }, 13039 { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) }, 13040 { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) }, 13041 { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) }, 13042 }; 13043 layer_data *dev_data = nullptr; 13044 13045 if (dev) { 13046 dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map); 13047 if (!dev_data->device_extensions.wsi_enabled) 13048 return nullptr; 13049 } 13050 13051 for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) { 13052 if (!strcmp(khr_swapchain_commands[i].name, name)) 13053 return khr_swapchain_commands[i].proc; 13054 } 13055 13056 if (dev_data) { 13057 if (!dev_data->device_extensions.wsi_display_swapchain_enabled) 13058 return nullptr; 13059 } 13060 13061 if (!strcmp("vkCreateSharedSwapchainsKHR", name)) 13062 return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR); 13063 13064 return nullptr; 13065} 13066 13067static PFN_vkVoidFunction 13068intercept_khr_surface_command(const char *name, VkInstance instance) { 13069 static const struct { 13070 const char *name; 13071 PFN_vkVoidFunction proc; 13072 bool instance_layer_data::*enable; 13073 } khr_surface_commands[] = { 13074#ifdef VK_USE_PLATFORM_ANDROID_KHR 13075 {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR), 13076 &instance_layer_data::androidSurfaceExtensionEnabled}, 13077#endif // VK_USE_PLATFORM_ANDROID_KHR 13078#ifdef VK_USE_PLATFORM_MIR_KHR 13079 {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR), 13080 &instance_layer_data::mirSurfaceExtensionEnabled}, 13081#endif // VK_USE_PLATFORM_MIR_KHR 13082#ifdef VK_USE_PLATFORM_WAYLAND_KHR 13083 {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR), 13084 &instance_layer_data::waylandSurfaceExtensionEnabled}, 13085#endif // VK_USE_PLATFORM_WAYLAND_KHR 13086#ifdef VK_USE_PLATFORM_WIN32_KHR 13087 {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR), 13088 &instance_layer_data::win32SurfaceExtensionEnabled}, 13089#endif // VK_USE_PLATFORM_WIN32_KHR 13090#ifdef VK_USE_PLATFORM_XCB_KHR 13091 {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR), 13092 &instance_layer_data::xcbSurfaceExtensionEnabled}, 13093#endif // VK_USE_PLATFORM_XCB_KHR 13094#ifdef VK_USE_PLATFORM_XLIB_KHR 13095 {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR), 13096 &instance_layer_data::xlibSurfaceExtensionEnabled}, 13097#endif // VK_USE_PLATFORM_XLIB_KHR 13098 { "vkCreateDisplayPlaneSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateDisplayPlaneSurfaceKHR), 13099 &instance_layer_data::displayExtensionEnabled}, 13100 {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR), 13101 &instance_layer_data::surfaceExtensionEnabled}, 13102 {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceCapabilitiesKHR), 13103 &instance_layer_data::surfaceExtensionEnabled}, 13104 {"vkGetPhysicalDeviceSurfaceSupportKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceSupportKHR), 13105 &instance_layer_data::surfaceExtensionEnabled}, 13106 {"vkGetPhysicalDeviceSurfacePresentModesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfacePresentModesKHR), 13107 &instance_layer_data::surfaceExtensionEnabled}, 13108 {"vkGetPhysicalDeviceSurfaceFormatsKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceFormatsKHR), 13109 &instance_layer_data::surfaceExtensionEnabled}, 13110 }; 13111 13112 instance_layer_data *instance_data = nullptr; 13113 if (instance) { 13114 instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map); 13115 } 13116 13117 for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) { 13118 if (!strcmp(khr_surface_commands[i].name, name)) { 13119 if (instance_data && !(instance_data->*(khr_surface_commands[i].enable))) 13120 return nullptr; 13121 return khr_surface_commands[i].proc; 13122 } 13123 } 13124 13125 return nullptr; 13126} 13127 13128} // namespace core_validation 13129 13130// vk_layer_logging.h expects these to be defined 13131 13132VKAPI_ATTR VkResult VKAPI_CALL 13133vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, 13134 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) { 13135 return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); 13136} 13137 13138VKAPI_ATTR void VKAPI_CALL 13139vkDestroyDebugReportCallbackEXT(VkInstance instance, 13140 VkDebugReportCallbackEXT msgCallback, 13141 const VkAllocationCallbacks *pAllocator) { 13142 core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); 13143} 13144 13145VKAPI_ATTR void VKAPI_CALL 13146vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object, 13147 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { 13148 core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg); 13149} 13150 13151// loader-layer interface v0, just wrappers since there is only a layer 13152 13153VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 13154vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) { 13155 return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties); 13156} 13157 13158VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 13159vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { 13160 return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties); 13161} 13162 13163VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 13164vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) { 13165 // the layer command handles VK_NULL_HANDLE just fine internally 13166 assert(physicalDevice == VK_NULL_HANDLE); 13167 return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties); 13168} 13169 13170VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 13171 const char *pLayerName, uint32_t *pCount, 13172 VkExtensionProperties *pProperties) { 13173 // the layer command handles VK_NULL_HANDLE just fine internally 13174 assert(physicalDevice == VK_NULL_HANDLE); 13175 return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties); 13176} 13177 13178VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { 13179 return core_validation::GetDeviceProcAddr(dev, funcName); 13180} 13181 13182VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { 13183 return core_validation::GetInstanceProcAddr(instance, funcName); 13184} 13185