1/* Copyright (c) 2015-2016 The Khronos Group Inc. 2 * Copyright (c) 2015-2016 Valve Corporation 3 * Copyright (c) 2015-2016 LunarG, Inc. 4 * Copyright (C) 2015-2016 Google Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and/or associated documentation files (the "Materials"), to 8 * deal in the Materials without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Materials, and to permit persons to whom the Materials 11 * are furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice(s) and this permission notice shall be included 14 * in all copies or substantial portions of the Materials. 15 * 16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 * 20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE 23 * USE OR OTHER DEALINGS IN THE MATERIALS 24 * 25 * Author: Mark Lobodzinski <mark@lunarg.com> 26 * Author: Mike Stroyan <mike@LunarG.com> 27 * Author: Tobin Ehlis <tobin@lunarg.com> 28 */ 29 30#include <stdio.h> 31#include <stdlib.h> 32#include <string.h> 33#include <unordered_map> 34#include <memory> 35 36#include "vk_loader_platform.h" 37#include "vk_dispatch_table_helper.h" 38#if defined(__GNUC__) 39#pragma GCC diagnostic ignored "-Wwrite-strings" 40#endif 41#if defined(__GNUC__) 42#pragma GCC diagnostic warning "-Wwrite-strings" 43#endif 44#include "vk_struct_size_helper.h" 45#include "device_limits.h" 46#include "vulkan/vk_layer.h" 47#include "vk_layer_config.h" 48#include "vk_enum_validate_helper.h" 49#include "vk_layer_table.h" 50#include "vk_layer_data.h" 51#include "vk_layer_logging.h" 52#include "vk_layer_extension_utils.h" 53#include "vk_layer_utils.h" 54 55// This struct will be stored in a map hashed by the dispatchable object 56struct layer_data { 57 debug_report_data *report_data; 58 std::vector<VkDebugReportCallbackEXT> logging_callback; 59 VkLayerDispatchTable *device_dispatch_table; 60 VkLayerInstanceDispatchTable *instance_dispatch_table; 61 // Track state of each instance 62 unique_ptr<INSTANCE_STATE> instanceState; 63 unique_ptr<PHYSICAL_DEVICE_STATE> physicalDeviceState; 64 VkPhysicalDeviceFeatures actualPhysicalDeviceFeatures; 65 VkPhysicalDeviceFeatures requestedPhysicalDeviceFeatures; 66 unordered_map<VkDevice, VkPhysicalDeviceProperties> physDevPropertyMap; 67 68 // Track physical device per logical device 69 VkPhysicalDevice physicalDevice; 70 // Vector indices correspond to queueFamilyIndex 71 vector<unique_ptr<VkQueueFamilyProperties>> queueFamilyProperties; 72 73 layer_data() 74 : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), instanceState(nullptr), 75 physicalDeviceState(nullptr), actualPhysicalDeviceFeatures(), requestedPhysicalDeviceFeatures(), physicalDevice(){}; 76}; 77 78static unordered_map<void *, layer_data *> layer_data_map; 79 80// TODO : This can be much smarter, using separate locks for separate global data 81static int globalLockInitialized = 0; 82static loader_platform_thread_mutex globalLock; 83 84template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map); 85 86static void init_device_limits(layer_data *my_data, const VkAllocationCallbacks *pAllocator) { 87 88 layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_device_limits"); 89 90 if (!globalLockInitialized) { 91 // TODO/TBD: Need to delete this mutex sometime. How??? One 92 // suggestion is to call this during vkCreateInstance(), and then we 93 // can clean it up during vkDestroyInstance(). However, that requires 94 // that the layer have per-instance locks. We need to come back and 95 // address this soon. 96 loader_platform_thread_create_mutex(&globalLock); 97 globalLockInitialized = 1; 98 } 99} 100 101static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}}; 102 103VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 104vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) { 105 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties); 106} 107 108VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 109 const char *pLayerName, uint32_t *pCount, 110 VkExtensionProperties *pProperties) { 111 if (pLayerName == NULL) { 112 dispatch_key key = get_dispatch_key(physicalDevice); 113 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 114 return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties); 115 } else { 116 return util_GetExtensionProperties(0, nullptr, pCount, pProperties); 117 } 118} 119 120static const VkLayerProperties dl_global_layers[] = {{ 121 "VK_LAYER_LUNARG_device_limits", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer", 122}}; 123 124VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 125vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { 126 return util_GetLayerProperties(ARRAY_SIZE(dl_global_layers), dl_global_layers, pCount, pProperties); 127} 128 129VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 130vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) { 131 return util_GetLayerProperties(ARRAY_SIZE(dl_global_layers), dl_global_layers, pCount, pProperties); 132} 133 134VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 135vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) { 136 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 137 138 assert(chain_info->u.pLayerInfo); 139 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 140 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); 141 if (fpCreateInstance == NULL) { 142 return VK_ERROR_INITIALIZATION_FAILED; 143 } 144 145 // Advance the link info for the next element on the chain 146 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 147 148 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); 149 if (result != VK_SUCCESS) 150 return result; 151 152 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map); 153 my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable; 154 layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr); 155 156 my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance, 157 pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames); 158 159 init_device_limits(my_data, pAllocator); 160 my_data->instanceState = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE()); 161 162 return VK_SUCCESS; 163} 164 165/* hook DestroyInstance to remove tableInstanceMap entry */ 166VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { 167 dispatch_key key = get_dispatch_key(instance); 168 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 169 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 170 pTable->DestroyInstance(instance, pAllocator); 171 172 // Clean up logging callback, if any 173 while (my_data->logging_callback.size() > 0) { 174 VkDebugReportCallbackEXT callback = my_data->logging_callback.back(); 175 layer_destroy_msg_callback(my_data->report_data, callback, pAllocator); 176 my_data->logging_callback.pop_back(); 177 } 178 179 layer_debug_report_destroy_instance(my_data->report_data); 180 delete my_data->instance_dispatch_table; 181 layer_data_map.erase(key); 182 if (layer_data_map.empty()) { 183 // Release mutex when destroying last instance. 184 loader_platform_thread_delete_mutex(&globalLock); 185 globalLockInitialized = 0; 186 } 187} 188 189VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 190vkEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices) { 191 VkBool32 skipCall = VK_FALSE; 192 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 193 if (my_data->instanceState) { 194 // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS 195 if (NULL == pPhysicalDevices) { 196 my_data->instanceState->vkEnumeratePhysicalDevicesState = QUERY_COUNT; 197 } else { 198 if (UNCALLED == my_data->instanceState->vkEnumeratePhysicalDevicesState) { 199 // Flag error here, shouldn't be calling this without having queried count 200 skipCall |= 201 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, 202 __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL", 203 "Invalid call sequence to vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first " 204 "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount."); 205 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state 206 else if (my_data->instanceState->physicalDevicesCount != *pPhysicalDeviceCount) { 207 // TODO: Having actual count match count from app is not a requirement, so this can be a warning 208 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 209 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL", 210 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count " 211 "supported by this instance is %u.", 212 *pPhysicalDeviceCount, my_data->instanceState->physicalDevicesCount); 213 } 214 my_data->instanceState->vkEnumeratePhysicalDevicesState = QUERY_DETAILS; 215 } 216 if (skipCall) 217 return VK_ERROR_VALIDATION_FAILED_EXT; 218 VkResult result = 219 my_data->instance_dispatch_table->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices); 220 if (NULL == pPhysicalDevices) { 221 my_data->instanceState->physicalDevicesCount = *pPhysicalDeviceCount; 222 } else { // Save physical devices 223 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) { 224 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(pPhysicalDevices[i]), layer_data_map); 225 phy_dev_data->physicalDeviceState = unique_ptr<PHYSICAL_DEVICE_STATE>(new PHYSICAL_DEVICE_STATE()); 226 // Init actual features for each physical device 227 my_data->instance_dispatch_table->GetPhysicalDeviceFeatures(pPhysicalDevices[i], 228 &(phy_dev_data->actualPhysicalDeviceFeatures)); 229 } 230 } 231 return result; 232 } else { 233 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, 234 DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (%#" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().", 235 (uint64_t)instance); 236 } 237 return VK_ERROR_VALIDATION_FAILED_EXT; 238} 239 240VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 241vkGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures *pFeatures) { 242 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map); 243 phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceFeaturesState = QUERY_DETAILS; 244 phy_dev_data->instance_dispatch_table->GetPhysicalDeviceFeatures(physicalDevice, pFeatures); 245} 246 247VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 248vkGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties *pFormatProperties) { 249 get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map) 250 ->instance_dispatch_table->GetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties); 251} 252 253VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 254vkGetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, 255 VkImageUsageFlags usage, VkImageCreateFlags flags, 256 VkImageFormatProperties *pImageFormatProperties) { 257 return get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map) 258 ->instance_dispatch_table->GetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags, 259 pImageFormatProperties); 260} 261 262VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 263vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties) { 264 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map); 265 phy_dev_data->instance_dispatch_table->GetPhysicalDeviceProperties(physicalDevice, pProperties); 266} 267 268VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 269vkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, 270 VkQueueFamilyProperties *pQueueFamilyProperties) { 271 VkBool32 skipCall = VK_FALSE; 272 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map); 273 if (phy_dev_data->physicalDeviceState) { 274 if (NULL == pQueueFamilyProperties) { 275 phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT; 276 } else { 277 // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to 278 // get count 279 if (UNCALLED == phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState) { 280 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 281 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL", 282 "Invalid call sequence to vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL " 283 "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ " 284 "NULL pQueueFamilyProperties to query pCount."); 285 } 286 // Then verify that pCount that is passed in on second call matches what was returned 287 if (phy_dev_data->physicalDeviceState->queueFamilyPropertiesCount != *pCount) { 288 289 // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so 290 // provide as warning 291 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 292 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL", 293 "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count " 294 "supported by this physicalDevice is %u.", 295 *pCount, phy_dev_data->physicalDeviceState->queueFamilyPropertiesCount); 296 } 297 phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS; 298 } 299 if (skipCall) 300 return; 301 phy_dev_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, 302 pQueueFamilyProperties); 303 if (NULL == pQueueFamilyProperties) { 304 phy_dev_data->physicalDeviceState->queueFamilyPropertiesCount = *pCount; 305 } else { // Save queue family properties 306 phy_dev_data->queueFamilyProperties.reserve(*pCount); 307 for (uint32_t i = 0; i < *pCount; i++) { 308 phy_dev_data->queueFamilyProperties.emplace_back(new VkQueueFamilyProperties(pQueueFamilyProperties[i])); 309 } 310 } 311 return; 312 } else { 313 log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 314 __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL", 315 "Invalid physicalDevice (%#" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().", 316 (uint64_t)physicalDevice); 317 } 318} 319 320VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 321vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) { 322 get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map) 323 ->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties); 324} 325 326VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 327vkGetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, 328 VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, 329 uint32_t *pNumProperties, VkSparseImageFormatProperties *pProperties) { 330 get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map) 331 ->instance_dispatch_table->GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, 332 tiling, pNumProperties, pProperties); 333} 334 335VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 336vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { 337 VkBool32 skipCall = VK_FALSE; 338 /* TODO: Verify viewportCount < maxViewports from VkPhysicalDeviceLimits */ 339 if (VK_FALSE == skipCall) { 340 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 341 my_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports); 342 } 343} 344 345VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 346vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { 347 VkBool32 skipCall = VK_FALSE; 348 /* TODO: Verify scissorCount < maxViewports from VkPhysicalDeviceLimits */ 349 /* TODO: viewportCount and scissorCount must match at draw time */ 350 if (VK_FALSE == skipCall) { 351 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 352 my_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors); 353 } 354} 355 356// Verify that features have been queried and verify that requested features are available 357static VkBool32 validate_features_request(layer_data *phy_dev_data) { 358 VkBool32 skipCall = VK_FALSE; 359 // Verify that all of the requested features are available 360 // Get ptrs into actual and requested structs and if requested is 1 but actual is 0, request is invalid 361 VkBool32 *actual = (VkBool32 *)&(phy_dev_data->actualPhysicalDeviceFeatures); 362 VkBool32 *requested = (VkBool32 *)&(phy_dev_data->requestedPhysicalDeviceFeatures); 363 // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues 364 // Need to provide the struct member name with the issue. To do that seems like we'll 365 // have to loop through each struct member which should be done w/ codegen to keep in synch. 366 uint32_t errors = 0; 367 uint32_t totalBools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); 368 for (uint32_t i = 0; i < totalBools; i++) { 369 if (requested[i] > actual[i]) { 370 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 371 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, 372 "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, " 373 "which is not available on this device.", 374 i); 375 errors++; 376 } 377 } 378 if (errors && (UNCALLED == phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceFeaturesState)) { 379 // If user didn't request features, notify them that they should 380 // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error 381 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 382 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL", 383 "You requested features that are unavailable on this device. You should first query feature " 384 "availability by calling vkGetPhysicalDeviceFeatures()."); 385 } 386 return skipCall; 387} 388 389VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, 390 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { 391 VkBool32 skipCall = VK_FALSE; 392 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); 393 // First check is app has actually requested queueFamilyProperties 394 if (!phy_dev_data->physicalDeviceState) { 395 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 396 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL", 397 "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices()."); 398 } else if (QUERY_DETAILS != phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState) { 399 // TODO: This is not called out as an invalid use in the spec so make more informative recommendation. 400 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 401 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, 402 "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties()."); 403 } else { 404 // Check that the requested queue properties are valid 405 for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) { 406 uint32_t requestedIndex = pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex; 407 if (phy_dev_data->queueFamilyProperties.size() <= 408 requestedIndex) { // requested index is out of bounds for this physical device 409 skipCall |= log_msg( 410 phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 411 __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL", 412 "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex); 413 } else if (pCreateInfo->pQueueCreateInfos[i].queueCount > 414 phy_dev_data->queueFamilyProperties[requestedIndex]->queueCount) { 415 skipCall |= 416 log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 417 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, 418 "DL", "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but " 419 "requested queueCount is %u.", 420 requestedIndex, phy_dev_data->queueFamilyProperties[requestedIndex]->queueCount, 421 pCreateInfo->pQueueCreateInfos[i].queueCount); 422 } 423 } 424 } 425 // Check that any requested features are available 426 if (pCreateInfo->pEnabledFeatures) { 427 phy_dev_data->requestedPhysicalDeviceFeatures = *(pCreateInfo->pEnabledFeatures); 428 skipCall |= validate_features_request(phy_dev_data); 429 } 430 if (skipCall) 431 return VK_ERROR_VALIDATION_FAILED_EXT; 432 433 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 434 435 assert(chain_info->u.pLayerInfo); 436 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 437 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; 438 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice"); 439 if (fpCreateDevice == NULL) { 440 return VK_ERROR_INITIALIZATION_FAILED; 441 } 442 443 // Advance the link info for the next element on the chain 444 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 445 446 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); 447 if (result != VK_SUCCESS) { 448 return result; 449 } 450 451 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); 452 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map); 453 my_device_data->device_dispatch_table = new VkLayerDispatchTable; 454 layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr); 455 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); 456 my_device_data->physicalDevice = gpu; 457 458 // Get physical device properties for this device 459 phy_dev_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(phy_dev_data->physDevPropertyMap[*pDevice])); 460 return result; 461} 462 463VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { 464 // Free device lifetime allocations 465 dispatch_key key = get_dispatch_key(device); 466 layer_data *my_device_data = get_my_data_ptr(key, layer_data_map); 467 my_device_data->device_dispatch_table->DestroyDevice(device, pAllocator); 468 delete my_device_data->device_dispatch_table; 469 layer_data_map.erase(key); 470} 471 472VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, 473 const VkAllocationCallbacks *pAllocator, 474 VkCommandPool *pCommandPool) { 475 // TODO : Verify that requested QueueFamilyIndex for this pool exists 476 VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map) 477 ->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool); 478 return result; 479} 480 481VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 482vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { 483 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 484 ->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator); 485} 486 487VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 488vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) { 489 VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map) 490 ->device_dispatch_table->ResetCommandPool(device, commandPool, flags); 491 return result; 492} 493 494VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 495vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) { 496 VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map) 497 ->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer); 498 return result; 499} 500 501VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 502vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t count, const VkCommandBuffer *pCommandBuffers) { 503 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 504 ->device_dispatch_table->FreeCommandBuffers(device, commandPool, count, pCommandBuffers); 505} 506 507VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 508vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { 509 bool skipCall = false; 510 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 511 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo; 512 if (dev_data->actualPhysicalDeviceFeatures.inheritedQueries == VK_FALSE && pInfo && pInfo->occlusionQueryEnable != VK_FALSE) { 513 skipCall |= log_msg( 514 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 515 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DEVLIMITS_INVALID_INHERITED_QUERY, "DL", 516 "Cannot set inherited occlusionQueryEnable in vkBeginCommandBuffer() when device does not support inheritedQueries."); 517 } 518 if (dev_data->actualPhysicalDeviceFeatures.inheritedQueries != VK_FALSE && pInfo && pInfo->occlusionQueryEnable != VK_FALSE && 519 !validate_VkQueryControlFlagBits(VkQueryControlFlagBits(pInfo->queryFlags))) { 520 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 521 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DEVLIMITS_INVALID_INHERITED_QUERY, "DL", 522 "Cannot enable in occlusion queries in vkBeginCommandBuffer() and set queryFlags to %d which is not a " 523 "valid combination of VkQueryControlFlagBits.", 524 pInfo->queryFlags); 525 } 526 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 527 if (!skipCall) 528 result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo); 529 return result; 530} 531 532VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 533vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) { 534 VkBool32 skipCall = VK_FALSE; 535 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 536 VkPhysicalDevice gpu = dev_data->physicalDevice; 537 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); 538 if (queueFamilyIndex >= 539 phy_dev_data->queueFamilyProperties.size()) { // requested index is out of bounds for this physical device 540 skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 541 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, 542 "DL", "Invalid queueFamilyIndex %u requested in vkGetDeviceQueue().", queueFamilyIndex); 543 } else if (queueIndex >= phy_dev_data->queueFamilyProperties[queueFamilyIndex]->queueCount) { 544 skipCall |= log_msg( 545 phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 546 DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL", 547 "Invalid queue request in vkGetDeviceQueue(). QueueFamilyIndex %u only has %u queues, but requested queueIndex is %u.", 548 queueFamilyIndex, phy_dev_data->queueFamilyProperties[queueFamilyIndex]->queueCount, queueIndex); 549 } 550 if (skipCall) 551 return; 552 dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue); 553} 554 555VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 556vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) { 557 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 558 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 559 VkBool32 skipCall = VK_FALSE; 560 561 VkDeviceSize uniformAlignment = dev_data->physDevPropertyMap[device].limits.minUniformBufferOffsetAlignment; 562 if (vk_safe_modulo(memoryOffset, uniformAlignment) != 0) { 563 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 564 0, __LINE__, DEVLIMITS_INVALID_UNIFORM_BUFFER_OFFSET, "DL", 565 "vkBindBufferMemory(): memoryOffset %#" PRIxLEAST64 566 " must be a multiple of device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64, 567 memoryOffset, uniformAlignment); 568 } 569 570 if (VK_FALSE == skipCall) { 571 result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset); 572 } 573 return result; 574} 575 576VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 577vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, 578 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { 579 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 580 VkBool32 skipCall = VK_FALSE; 581 582 for (uint32_t i = 0; i < descriptorWriteCount; i++) { 583 if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || 584 (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)) { 585 VkDeviceSize uniformAlignment = dev_data->physDevPropertyMap[device].limits.minUniformBufferOffsetAlignment; 586 for (uint32_t j = 0; j < pDescriptorWrites[i].descriptorCount; j++) { 587 if (vk_safe_modulo(pDescriptorWrites[i].pBufferInfo[j].offset, uniformAlignment) != 0) { 588 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 589 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 590 DEVLIMITS_INVALID_UNIFORM_BUFFER_OFFSET, "DL", 591 "vkUpdateDescriptorSets(): pDescriptorWrites[%d].pBufferInfo[%d].offset (%#" PRIxLEAST64 592 ") must be a multiple of device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64, 593 i, j, pDescriptorWrites[i].pBufferInfo[j].offset, uniformAlignment); 594 } 595 } 596 } else if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) || 597 (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { 598 VkDeviceSize storageAlignment = dev_data->physDevPropertyMap[device].limits.minStorageBufferOffsetAlignment; 599 for (uint32_t j = 0; j < pDescriptorWrites[i].descriptorCount; j++) { 600 if (vk_safe_modulo(pDescriptorWrites[i].pBufferInfo[j].offset, storageAlignment) != 0) { 601 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 602 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 603 DEVLIMITS_INVALID_STORAGE_BUFFER_OFFSET, "DL", 604 "vkUpdateDescriptorSets(): pDescriptorWrites[%d].pBufferInfo[%d].offset (%#" PRIxLEAST64 605 ") must be a multiple of device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64, 606 i, j, pDescriptorWrites[i].pBufferInfo[j].offset, storageAlignment); 607 } 608 } 609 } 610 } 611 if (skipCall == VK_FALSE) { 612 dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 613 pDescriptorCopies); 614 } 615} 616 617VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, 618 VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) { 619 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 620 621 // dstOffset is the byte offset into the buffer to start updating and must be a multiple of 4. 622 if (dstOffset & 3) { 623 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 624 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "DL", 625 "vkCmdUpdateBuffer parameter, VkDeviceSize dstOffset, is not a multiple of 4")) { 626 return; 627 } 628 } 629 630 // dataSize is the number of bytes to update, which must be a multiple of 4. 631 if (dataSize & 3) { 632 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 633 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "DL", 634 "vkCmdUpdateBuffer parameter, VkDeviceSize dataSize, is not a multiple of 4")) { 635 return; 636 } 637 } 638 639 dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData); 640} 641 642VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 643vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) { 644 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 645 646 // dstOffset is the byte offset into the buffer to start filling and must be a multiple of 4. 647 if (dstOffset & 3) { 648 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 649 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "DL", 650 "vkCmdFillBuffer parameter, VkDeviceSize dstOffset, is not a multiple of 4")) { 651 return; 652 } 653 } 654 655 // size is the number of bytes to fill, which must be a multiple of 4. 656 if (size & 3) { 657 layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 658 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "DL", 659 "vkCmdFillBuffer parameter, VkDeviceSize size, is not a multiple of 4")) { 660 return; 661 } 662 } 663 664 dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data); 665} 666 667VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 668vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, 669 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) { 670 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 671 VkResult res = my_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); 672 if (VK_SUCCESS == res) { 673 res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback); 674 } 675 return res; 676} 677 678VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, 679 VkDebugReportCallbackEXT msgCallback, 680 const VkAllocationCallbacks *pAllocator) { 681 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 682 my_data->instance_dispatch_table->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); 683 layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator); 684} 685 686VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 687vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object, 688 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { 689 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 690 my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, 691 pMsg); 692} 693 694VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { 695 if (!strcmp(funcName, "vkGetDeviceProcAddr")) 696 return (PFN_vkVoidFunction)vkGetDeviceProcAddr; 697 if (!strcmp(funcName, "vkDestroyDevice")) 698 return (PFN_vkVoidFunction)vkDestroyDevice; 699 if (!strcmp(funcName, "vkGetDeviceQueue")) 700 return (PFN_vkVoidFunction)vkGetDeviceQueue; 701 if (!strcmp(funcName, "CreateCommandPool")) 702 return (PFN_vkVoidFunction)vkCreateCommandPool; 703 if (!strcmp(funcName, "DestroyCommandPool")) 704 return (PFN_vkVoidFunction)vkDestroyCommandPool; 705 if (!strcmp(funcName, "ResetCommandPool")) 706 return (PFN_vkVoidFunction)vkResetCommandPool; 707 if (!strcmp(funcName, "vkAllocateCommandBuffers")) 708 return (PFN_vkVoidFunction)vkAllocateCommandBuffers; 709 if (!strcmp(funcName, "vkFreeCommandBuffers")) 710 return (PFN_vkVoidFunction)vkFreeCommandBuffers; 711 if (!strcmp(funcName, "vkBeginCommandBuffer")) 712 return (PFN_vkVoidFunction)vkBeginCommandBuffer; 713 if (!strcmp(funcName, "vkCmdUpdateBuffer")) 714 return (PFN_vkVoidFunction)vkCmdUpdateBuffer; 715 if (!strcmp(funcName, "vkBindBufferMemory")) 716 return (PFN_vkVoidFunction)vkBindBufferMemory; 717 if (!strcmp(funcName, "vkUpdateDescriptorSets")) 718 return (PFN_vkVoidFunction)vkUpdateDescriptorSets; 719 if (!strcmp(funcName, "vkCmdFillBuffer")) 720 return (PFN_vkVoidFunction)vkCmdFillBuffer; 721 722 if (dev == NULL) 723 return NULL; 724 725 layer_data *my_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map); 726 VkLayerDispatchTable *pTable = my_data->device_dispatch_table; 727 { 728 if (pTable->GetDeviceProcAddr == NULL) 729 return NULL; 730 return pTable->GetDeviceProcAddr(dev, funcName); 731 } 732} 733 734VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { 735 PFN_vkVoidFunction fptr; 736 737 layer_data *my_data; 738 if (!strcmp(funcName, "vkGetInstanceProcAddr")) 739 return (PFN_vkVoidFunction)vkGetInstanceProcAddr; 740 if (!strcmp(funcName, "vkGetDeviceProcAddr")) 741 return (PFN_vkVoidFunction)vkGetDeviceProcAddr; 742 if (!strcmp(funcName, "vkCreateInstance")) 743 return (PFN_vkVoidFunction)vkCreateInstance; 744 if (!strcmp(funcName, "vkDestroyInstance")) 745 return (PFN_vkVoidFunction)vkDestroyInstance; 746 if (!strcmp(funcName, "vkCreateDevice")) 747 return (PFN_vkVoidFunction)vkCreateDevice; 748 if (!strcmp(funcName, "vkEnumeratePhysicalDevices")) 749 return (PFN_vkVoidFunction)vkEnumeratePhysicalDevices; 750 if (!strcmp(funcName, "vkGetPhysicalDeviceFeatures")) 751 return (PFN_vkVoidFunction)vkGetPhysicalDeviceFeatures; 752 if (!strcmp(funcName, "vkGetPhysicalDeviceFormatProperties")) 753 return (PFN_vkVoidFunction)vkGetPhysicalDeviceFormatProperties; 754 if (!strcmp(funcName, "vkGetPhysicalDeviceImageFormatProperties")) 755 return (PFN_vkVoidFunction)vkGetPhysicalDeviceImageFormatProperties; 756 if (!strcmp(funcName, "vkGetPhysicalDeviceProperties")) 757 return (PFN_vkVoidFunction)vkGetPhysicalDeviceProperties; 758 if (!strcmp(funcName, "vkGetPhysicalDeviceQueueFamilyProperties")) 759 return (PFN_vkVoidFunction)vkGetPhysicalDeviceQueueFamilyProperties; 760 if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties")) 761 return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties; 762 if (!strcmp(funcName, "vkGetPhysicalDeviceSparseImageFormatProperties")) 763 return (PFN_vkVoidFunction)vkGetPhysicalDeviceSparseImageFormatProperties; 764 if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties")) 765 return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties; 766 if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties")) 767 return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties; 768 if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties")) 769 return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties; 770 if (!strcmp(funcName, "vkEnumerateInstanceDeviceProperties")) 771 return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties; 772 773 if (!instance) 774 return NULL; 775 776 my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 777 778 fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName); 779 if (fptr) 780 return fptr; 781 782 { 783 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 784 if (pTable->GetInstanceProcAddr == NULL) 785 return NULL; 786 return pTable->GetInstanceProcAddr(instance, funcName); 787 } 788} 789