loader.c revision 2db08044d7d9b09898395f53f053ba07c84cbe67
1fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt/* 2fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * Vulkan 3fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * 4fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * Copyright (C) 2014 LunarG, Inc. 5fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * 6fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * Permission is hereby granted, free of charge, to any person obtaining a 7fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * copy of this software and associated documentation files (the "Software"), 8fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * to deal in the Software without restriction, including without limitation 9fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * and/or sell copies of the Software, and to permit persons to whom the 11fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * Software is furnished to do so, subject to the following conditions: 12fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * 13fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * The above copyright notice and this permission notice shall be included 14fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * in all copies or substantial portions of the Software. 15fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * 16fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17b97e428f8acf1ecb93f38f8d0063d2f2fd0bc36eDmitry Shmidt * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18aff761db795db8b506c6f6af7ff607856cf85a81Dmitry Shmidt * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19aff761db795db8b506c6f6af7ff607856cf85a81Dmitry Shmidt * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20aff761db795db8b506c6f6af7ff607856cf85a81Dmitry Shmidt * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21e4663044d3a689fb5458247e9bc0f8b58cf72fcaDmitry Shmidt * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22e4663044d3a689fb5458247e9bc0f8b58cf72fcaDmitry Shmidt * DEALINGS IN THE SOFTWARE. 23e4663044d3a689fb5458247e9bc0f8b58cf72fcaDmitry Shmidt * 24fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * Authors: 25fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * Chia-I Wu <olv@lunarg.com> 26fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * Jon Ashburn <jon@lunarg.com> 27fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * Courtney Goeltzenleuchter <courtney@lunarg.com> 28fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt * Ian Elliott <ian@lunarg.com> 29fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt */ 30fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#define _GNU_SOURCE 31fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include <stdio.h> 32fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include <stdlib.h> 33fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include <stdarg.h> 34fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include <stdbool.h> 35fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include <string.h> 36fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt 37fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include <sys/types.h> 38fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#if defined(WIN32) 39fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include "dirent_on_windows.h" 40fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#else // WIN32 41fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include <dirent.h> 42fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#endif // WIN32 43fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include "vk_loader_platform.h" 44fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include "loader.h" 45fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include "gpa_helper.h" 46fb45fd5cfed8bdccd0859c7fc05449fc187e2d06Dmitry Shmidt#include "table_ops.h" 47#include "debug_report.h" 48#include "vk_icd.h" 49#include "cJSON.h" 50 51static loader_platform_dl_handle loader_add_layer_lib( 52 const struct loader_instance *inst, 53 const char *chain_type, 54 struct loader_layer_properties *layer_prop); 55 56static void loader_remove_layer_lib( 57 struct loader_instance *inst, 58 struct loader_layer_properties *layer_prop); 59 60struct loader_struct loader = {0}; 61// TLS for instance for alloc/free callbacks 62THREAD_LOCAL_DECL struct loader_instance *tls_instance; 63 64static PFN_vkVoidFunction VKAPI loader_GetInstanceProcAddr( 65 VkInstance instance, 66 const char * pName); 67static bool loader_init_ext_list( 68 const struct loader_instance *inst, 69 struct loader_extension_list *ext_info); 70 71enum loader_debug { 72 LOADER_INFO_BIT = 0x01, 73 LOADER_WARN_BIT = 0x02, 74 LOADER_PERF_BIT = 0x04, 75 LOADER_ERROR_BIT = 0x08, 76 LOADER_DEBUG_BIT = 0x10, 77}; 78 79uint32_t g_loader_debug = 0; 80uint32_t g_loader_log_msgs = 0; 81 82//thread safety lock for accessing global data structures such as "loader" 83// all entrypoints on the instance chain need to be locked except GPA 84// additionally CreateDevice and DestroyDevice needs to be locked 85loader_platform_thread_mutex loader_lock; 86 87// This table contains the loader's instance dispatch table, which contains 88// default functions if no instance layers are activated. This contains 89// pointers to "terminator functions". 90const VkLayerInstanceDispatchTable instance_disp = { 91 .GetInstanceProcAddr = loader_GetInstanceProcAddr, 92 .CreateInstance = loader_CreateInstance, 93 .DestroyInstance = loader_DestroyInstance, 94 .EnumeratePhysicalDevices = loader_EnumeratePhysicalDevices, 95 .GetPhysicalDeviceFeatures = loader_GetPhysicalDeviceFeatures, 96 .GetPhysicalDeviceFormatProperties = loader_GetPhysicalDeviceFormatProperties, 97 .GetPhysicalDeviceImageFormatProperties = loader_GetPhysicalDeviceImageFormatProperties, 98 .GetPhysicalDeviceProperties = loader_GetPhysicalDeviceProperties, 99 .GetPhysicalDeviceQueueFamilyProperties = loader_GetPhysicalDeviceQueueFamilyProperties, 100 .GetPhysicalDeviceMemoryProperties = loader_GetPhysicalDeviceMemoryProperties, 101 .GetPhysicalDeviceExtensionProperties = loader_GetPhysicalDeviceExtensionProperties, 102 .GetPhysicalDeviceLayerProperties = loader_GetPhysicalDeviceLayerProperties, 103 .GetPhysicalDeviceSparseImageFormatProperties = loader_GetPhysicalDeviceSparseImageFormatProperties, 104 .GetPhysicalDeviceSurfaceSupportKHR = loader_GetPhysicalDeviceSurfaceSupportKHR, 105 .DbgCreateMsgCallback = loader_DbgCreateMsgCallback, 106 .DbgDestroyMsgCallback = loader_DbgDestroyMsgCallback, 107}; 108 109LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init); 110 111void* loader_heap_alloc( 112 const struct loader_instance *instance, 113 size_t size, 114 VkSystemAllocType alloc_type) 115{ 116 if (instance && instance->alloc_callbacks.pfnAlloc) { 117 /* TODO: What should default alignment be? 1, 4, 8, other? */ 118 return instance->alloc_callbacks.pfnAlloc(instance->alloc_callbacks.pUserData, size, 4, alloc_type); 119 } 120 return malloc(size); 121} 122 123void* loader_aligned_heap_alloc( 124 const struct loader_instance *instance, 125 size_t size, 126 size_t alignment, 127 VkSystemAllocType alloc_type) 128{ 129 if (instance && instance->alloc_callbacks.pfnAlloc) { 130 return instance->alloc_callbacks.pfnAlloc(instance->alloc_callbacks.pUserData, size, alignment, alloc_type); 131 } 132#if defined(_WIN32) 133 return _aligned_malloc(alignment, size); 134#else 135 return aligned_alloc(alignment, size); 136#endif 137} 138 139void loader_heap_free( 140 const struct loader_instance *instance, 141 void *pMem) 142{ 143 if (instance && instance->alloc_callbacks.pfnFree) { 144 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMem); 145 return; 146 } 147 free(pMem); 148} 149 150void* loader_heap_realloc( 151 const struct loader_instance *instance, 152 void *pMem, 153 size_t orig_size, 154 size_t size, 155 VkSystemAllocType alloc_type) 156{ 157 if (pMem == NULL || orig_size == 0) 158 return loader_heap_alloc(instance, size, alloc_type); 159 if (size == 0) { 160 loader_heap_free(instance, pMem); 161 return NULL; 162 } 163 if (instance && instance->alloc_callbacks.pfnAlloc) { 164 if (size <= orig_size) { 165 memset(((uint8_t *)pMem) + size, 0, orig_size - size); 166 return pMem; 167 } 168 void *new_ptr = instance->alloc_callbacks.pfnAlloc(instance->alloc_callbacks.pUserData, size, 4, alloc_type); 169 if (!new_ptr) 170 return NULL; 171 memcpy(new_ptr, pMem, orig_size); 172 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMem); 173 } 174 return realloc(pMem, size); 175} 176 177void *loader_tls_heap_alloc(size_t size) 178{ 179 return loader_heap_alloc(tls_instance, size, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 180} 181 182void loader_tls_heap_free(void *pMem) 183{ 184 return loader_heap_free(tls_instance, pMem); 185} 186 187static void loader_log(VkFlags msg_type, int32_t msg_code, 188 const char *format, ...) 189{ 190 char msg[512]; 191 va_list ap; 192 int ret; 193 194 if (!(msg_type & g_loader_log_msgs)) { 195 return; 196 } 197 198 va_start(ap, format); 199 ret = vsnprintf(msg, sizeof(msg), format, ap); 200 if ((ret >= (int) sizeof(msg)) || ret < 0) { 201 msg[sizeof(msg)-1] = '\0'; 202 } 203 va_end(ap); 204 205#if defined(WIN32) 206 OutputDebugString(msg); 207 OutputDebugString("\n"); 208#endif 209 fputs(msg, stderr); 210 fputc('\n', stderr); 211} 212 213#if defined(WIN32) 214static char *loader_get_next_path(char *path); 215/** 216* Find the list of registry files (names within a key) in key "location". 217* 218* This function looks in the registry (hive = DEFAULT_VK_REGISTRY_HIVE) key as given in "location" 219* for a list or name/values which are added to a returned list (function return value). 220* The DWORD values within the key must be 0 or they are skipped. 221* Function return is a string with a ';' separated list of filenames. 222* Function return is NULL if no valid name/value pairs are found in the key, 223* or the key is not found. 224* 225* \returns 226* A string list of filenames as pointer. 227* When done using the returned string list, pointer should be freed. 228*/ 229static char *loader_get_registry_files(const struct loader_instance *inst, char *location) 230{ 231 LONG rtn_value; 232 HKEY hive, key; 233 DWORD access_flags = KEY_QUERY_VALUE; 234 char name[2048]; 235 char *out = NULL; 236 char *loc = location; 237 char *next; 238 DWORD idx = 0; 239 DWORD name_size = sizeof(name); 240 DWORD value; 241 DWORD total_size = 4096; 242 DWORD value_size = sizeof(value); 243 244 while(*loc) 245 { 246 next = loader_get_next_path(loc); 247 hive = DEFAULT_VK_REGISTRY_HIVE; 248 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 249 if (rtn_value != ERROR_SUCCESS) { 250 // We didn't find the key. Try the 32-bit hive (where we've seen the 251 // key end up on some people's systems): 252 access_flags |= KEY_WOW64_32KEY; 253 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 254 if (rtn_value != ERROR_SUCCESS) { 255 // We still couldn't find the key, so give up: 256 loc = next; 257 continue; 258 } 259 } 260 261 while ((rtn_value = RegEnumValue(key, idx++, name, &name_size, NULL, NULL, (LPBYTE) &value, &value_size)) == ERROR_SUCCESS) { 262 if (value_size == sizeof(value) && value == 0) { 263 if (out == NULL) { 264 out = loader_heap_alloc(inst, total_size, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 265 out[0] = '\0'; 266 } 267 else if (strlen(out) + name_size + 1 > total_size) { 268 out = loader_heap_realloc(inst, out, total_size, total_size * 2, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 269 total_size *= 2; 270 } 271 if (out == NULL) { 272 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory, failed loader_get_registry_files"); 273 return NULL; 274 } 275 if (strlen(out) == 0) 276 snprintf(out, name_size + 1, "%s", name); 277 else 278 snprintf(out + strlen(out), name_size + 2, "%c%s", PATH_SEPERATOR, name); 279 } 280 name_size = 2048; 281 } 282 loc = next; 283 } 284 285 return out; 286} 287 288#endif // WIN32 289 290/** 291 * Given string of three part form "maj.min.pat" convert to a vulkan version 292 * number. 293 */ 294static uint32_t loader_make_version(const char *vers_str) 295{ 296 uint32_t vers = 0, major, minor, patch; 297 char *minor_str= NULL; 298 char *patch_str = NULL; 299 char *cstr; 300 char *str; 301 302 if (!vers_str) 303 return vers; 304 cstr = loader_stack_alloc(strlen(vers_str) + 1); 305 strcpy(cstr, vers_str); 306 while ((str = strchr(cstr, '.')) != NULL) { 307 if (minor_str == NULL) { 308 minor_str = str + 1; 309 *str = '\0'; 310 major = atoi(cstr); 311 } 312 else if (patch_str == NULL) { 313 patch_str = str + 1; 314 *str = '\0'; 315 minor = atoi(minor_str); 316 } 317 else { 318 return vers; 319 } 320 cstr = str + 1; 321 } 322 patch = atoi(patch_str); 323 324 return VK_MAKE_VERSION(major, minor, patch); 325 326} 327 328bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) 329{ 330 return strcmp(op1->extName, op2->extName) == 0 ? true : false; 331} 332 333/** 334 * Search the given ext_array for an extension 335 * matching the given vk_ext_prop 336 */ 337bool has_vk_extension_property_array( 338 const VkExtensionProperties *vk_ext_prop, 339 const uint32_t count, 340 const VkExtensionProperties *ext_array) 341{ 342 for (uint32_t i = 0; i < count; i++) { 343 if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) 344 return true; 345 } 346 return false; 347} 348 349/** 350 * Search the given ext_list for an extension 351 * matching the given vk_ext_prop 352 */ 353bool has_vk_extension_property( 354 const VkExtensionProperties *vk_ext_prop, 355 const struct loader_extension_list *ext_list) 356{ 357 for (uint32_t i = 0; i < ext_list->count; i++) { 358 if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) 359 return true; 360 } 361 return false; 362} 363 364static inline bool loader_is_layer_type_device(const enum layer_type type) { 365 if ((type & VK_LAYER_TYPE_DEVICE_EXPLICIT) || 366 (type & VK_LAYER_TYPE_DEVICE_IMPLICIT)) 367 return true; 368 return false; 369} 370 371/* 372 * Search the given layer list for a layer matching the given layer name 373 */ 374static struct loader_layer_properties *loader_get_layer_property( 375 const char *name, 376 const struct loader_layer_list *layer_list) 377{ 378 for (uint32_t i = 0; i < layer_list->count; i++) { 379 const VkLayerProperties *item = &layer_list->list[i].info; 380 if (strcmp(name, item->layerName) == 0) 381 return &layer_list->list[i]; 382 } 383 return NULL; 384} 385 386/** 387 * Get the next unused layer property in the list. Init the property to zero. 388 */ 389static struct loader_layer_properties *loader_get_next_layer_property( 390 const struct loader_instance *inst, 391 struct loader_layer_list *layer_list) 392{ 393 if (layer_list->capacity == 0) { 394 layer_list->list = loader_heap_alloc(inst, 395 sizeof(struct loader_layer_properties) * 64, 396 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 397 if (layer_list->list == NULL) { 398 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't add any layer properties to list"); 399 return NULL; 400 } 401 memset(layer_list->list, 0, sizeof(struct loader_layer_properties) * 64); 402 layer_list->capacity = sizeof(struct loader_layer_properties) * 64; 403 } 404 405 // ensure enough room to add an entry 406 if ((layer_list->count + 1) * sizeof (struct loader_layer_properties) 407 > layer_list->capacity) { 408 layer_list->list = loader_heap_realloc(inst, layer_list->list, 409 layer_list->capacity, 410 layer_list->capacity * 2, 411 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 412 if (layer_list->list == NULL) { 413 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 414 "realloc failed for layer list"); 415 } 416 layer_list->capacity *= 2; 417 } 418 419 layer_list->count++; 420 return &(layer_list->list[layer_list->count - 1]); 421} 422 423/** 424 * Remove all layer properties entrys from the list 425 */ 426void loader_delete_layer_properties( 427 const struct loader_instance *inst, 428 struct loader_layer_list *layer_list) 429{ 430 uint32_t i; 431 432 if (!layer_list) 433 return; 434 435 for (i = 0; i < layer_list->count; i++) { 436 loader_destroy_ext_list(inst, &layer_list->list[i].instance_extension_list); 437 loader_destroy_ext_list(inst, &layer_list->list[i].device_extension_list); 438 } 439 layer_list->count = 0; 440 441 if (layer_list->capacity > 0) { 442 layer_list->capacity = 0; 443 loader_heap_free(inst, layer_list->list); 444 } 445 446} 447 448static void loader_add_global_extensions( 449 const struct loader_instance *inst, 450 const PFN_vkGetGlobalExtensionProperties fp_get_props, 451 const char *lib_name, 452 struct loader_extension_list *ext_list) 453{ 454 uint32_t i, count; 455 VkExtensionProperties *ext_props; 456 VkResult res; 457 458 if (!fp_get_props) { 459 /* No GetGlobalExtensionProperties defined */ 460 return; 461 } 462 463 res = fp_get_props(NULL, &count, NULL); 464 if (res != VK_SUCCESS) { 465 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Error getting global extension count from %s", lib_name); 466 return; 467 } 468 469 if (count == 0) { 470 /* No ExtensionProperties to report */ 471 return; 472 } 473 474 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); 475 476 res = fp_get_props(NULL, &count, ext_props); 477 if (res != VK_SUCCESS) { 478 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Error getting global extensions from %s", lib_name); 479 return; 480 } 481 482 for (i = 0; i < count; i++) { 483 char spec_version[64]; 484 485 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 486 VK_MAJOR(ext_props[i].specVersion), 487 VK_MINOR(ext_props[i].specVersion), 488 VK_PATCH(ext_props[i].specVersion)); 489 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 490 "Global Extension: %s (%s) version %s", 491 ext_props[i].extName, lib_name, spec_version); 492 loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 493 } 494 495 return; 496} 497 498static void loader_add_physical_device_extensions( 499 const struct loader_instance *inst, 500 PFN_vkGetPhysicalDeviceExtensionProperties get_phys_dev_ext_props, 501 VkPhysicalDevice physical_device, 502 const char *lib_name, 503 struct loader_extension_list *ext_list) 504{ 505 uint32_t i, count; 506 VkResult res; 507 VkExtensionProperties *ext_props; 508 509 if (!get_phys_dev_ext_props) { 510 /* No GetPhysicalDeviceExtensionProperties defined */ 511 return; 512 } 513 514 res = get_phys_dev_ext_props(physical_device, NULL, &count, NULL); 515 if (res == VK_SUCCESS && count > 0) { 516 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); 517 518 res = get_phys_dev_ext_props(physical_device, NULL, &count, ext_props); 519 for (i = 0; i < count; i++) { 520 char spec_version[64]; 521 522 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 523 VK_MAJOR(ext_props[i].specVersion), 524 VK_MINOR(ext_props[i].specVersion), 525 VK_PATCH(ext_props[i].specVersion)); 526 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 527 "PhysicalDevice Extension: %s (%s) version %s", 528 ext_props[i].extName, lib_name, spec_version); 529 loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 530 } 531 } else { 532 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Error getting physical device extension info count from library %s", lib_name); 533 } 534 535 return; 536} 537 538static bool loader_init_ext_list(const struct loader_instance *inst, 539 struct loader_extension_list *ext_info) 540{ 541 ext_info->capacity = 32 * sizeof(VkExtensionProperties); 542 ext_info->list = loader_heap_alloc(inst, ext_info->capacity, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 543 if (ext_info->list == NULL) { 544 return false; 545 } 546 memset(ext_info->list, 0, ext_info->capacity); 547 ext_info->count = 0; 548 return true; 549} 550 551void loader_destroy_ext_list(const struct loader_instance *inst, 552 struct loader_extension_list *ext_info) 553{ 554 loader_heap_free(inst, ext_info->list); 555 ext_info->count = 0; 556 ext_info->capacity = 0; 557} 558 559/* 560 * Append non-duplicate extension properties defined in props 561 * to the given ext_list. 562 */ 563void loader_add_to_ext_list( 564 const struct loader_instance *inst, 565 struct loader_extension_list *ext_list, 566 uint32_t prop_list_count, 567 const VkExtensionProperties *props) 568{ 569 uint32_t i; 570 const VkExtensionProperties *cur_ext; 571 572 if (ext_list->list == NULL || ext_list->capacity == 0) { 573 loader_init_ext_list(inst, ext_list); 574 } 575 576 if (ext_list->list == NULL) 577 return; 578 579 for (i = 0; i < prop_list_count; i++) { 580 cur_ext = &props[i]; 581 582 // look for duplicates 583 if (has_vk_extension_property(cur_ext, ext_list)) { 584 continue; 585 } 586 587 // add to list at end 588 // check for enough capacity 589 if (ext_list->count * sizeof(VkExtensionProperties) 590 >= ext_list->capacity) { 591 592 ext_list->list = loader_heap_realloc(inst, 593 ext_list->list, 594 ext_list->capacity, 595 ext_list->capacity * 2, 596 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 597 // double capacity 598 ext_list->capacity *= 2; 599 } 600 601 memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties)); 602 ext_list->count++; 603 } 604} 605 606/** 607 * Search the given search_list for any layers in the props list. 608 * Add these to the output layer_list. Don't add duplicates to the output layer_list. 609 */ 610static VkResult loader_add_layer_names_to_list( 611 const struct loader_instance *inst, 612 struct loader_layer_list *output_list, 613 uint32_t name_count, 614 const char * const *names, 615 const struct loader_layer_list *search_list) 616{ 617 struct loader_layer_properties *layer_prop; 618 VkResult err = VK_SUCCESS; 619 620 for (uint32_t i = 0; i < name_count; i++) { 621 const char *search_target = names[i]; 622 layer_prop = loader_get_layer_property(search_target, search_list); 623 if (!layer_prop) { 624 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Unable to find layer %s", search_target); 625 err = VK_ERROR_INVALID_LAYER; 626 continue; 627 } 628 629 loader_add_to_layer_list(inst, output_list, 1, layer_prop); 630 } 631 632 return err; 633} 634 635 636/* 637 * Manage lists of VkLayerProperties 638 */ 639static bool loader_init_layer_list(const struct loader_instance *inst, 640 struct loader_layer_list *list) 641{ 642 list->capacity = 32 * sizeof(struct loader_layer_properties); 643 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 644 if (list->list == NULL) { 645 return false; 646 } 647 memset(list->list, 0, list->capacity); 648 list->count = 0; 649 return true; 650} 651 652void loader_destroy_layer_list(const struct loader_instance *inst, 653 struct loader_layer_list *layer_list) 654{ 655 loader_heap_free(inst, layer_list->list); 656 layer_list->count = 0; 657 layer_list->capacity = 0; 658} 659 660/* 661 * Manage list of layer libraries (loader_lib_info) 662 */ 663static bool loader_init_layer_library_list(const struct loader_instance *inst, 664 struct loader_layer_library_list *list) 665{ 666 list->capacity = 32 * sizeof(struct loader_lib_info); 667 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 668 if (list->list == NULL) { 669 return false; 670 } 671 memset(list->list, 0, list->capacity); 672 list->count = 0; 673 return true; 674} 675 676void loader_destroy_layer_library_list(const struct loader_instance *inst, 677 struct loader_layer_library_list *list) 678{ 679 for (uint32_t i = 0; i < list->count; i++) { 680 loader_heap_free(inst, list->list[i].lib_name); 681 } 682 loader_heap_free(inst, list->list); 683 list->count = 0; 684 list->capacity = 0; 685} 686 687void loader_add_to_layer_library_list( 688 const struct loader_instance *inst, 689 struct loader_layer_library_list *list, 690 uint32_t item_count, 691 const struct loader_lib_info *new_items) 692{ 693 uint32_t i; 694 struct loader_lib_info *item; 695 696 if (list->list == NULL || list->capacity == 0) { 697 loader_init_layer_library_list(inst, list); 698 } 699 700 if (list->list == NULL) 701 return; 702 703 for (i = 0; i < item_count; i++) { 704 item = (struct loader_lib_info *) &new_items[i]; 705 706 // look for duplicates 707 for (uint32_t j = 0; j < list->count; j++) { 708 if (strcmp(list->list[i].lib_name, new_items->lib_name) == 0) { 709 continue; 710 } 711 } 712 713 // add to list at end 714 // check for enough capacity 715 if (list->count * sizeof(struct loader_lib_info) 716 >= list->capacity) { 717 718 list->list = loader_heap_realloc(inst, 719 list->list, 720 list->capacity, 721 list->capacity * 2, 722 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 723 // double capacity 724 list->capacity *= 2; 725 } 726 727 memcpy(&list->list[list->count], item, sizeof(struct loader_lib_info)); 728 list->count++; 729 } 730} 731 732 733/* 734 * Search the given layer list for a list 735 * matching the given VkLayerProperties 736 */ 737bool has_vk_layer_property( 738 const VkLayerProperties *vk_layer_prop, 739 const struct loader_layer_list *list) 740{ 741 for (uint32_t i = 0; i < list->count; i++) { 742 if (strcmp(vk_layer_prop->layerName, list->list[i].info.layerName) == 0) 743 return true; 744 } 745 return false; 746} 747 748/* 749 * Search the given layer list for a layer 750 * matching the given name 751 */ 752bool has_layer_name( 753 const char *name, 754 const struct loader_layer_list *list) 755{ 756 for (uint32_t i = 0; i < list->count; i++) { 757 if (strcmp(name, list->list[i].info.layerName) == 0) 758 return true; 759 } 760 return false; 761} 762 763/* 764 * Append non-duplicate layer properties defined in prop_list 765 * to the given layer_info list 766 */ 767void loader_add_to_layer_list( 768 const struct loader_instance *inst, 769 struct loader_layer_list *list, 770 uint32_t prop_list_count, 771 const struct loader_layer_properties *props) 772{ 773 uint32_t i; 774 struct loader_layer_properties *layer; 775 776 if (list->list == NULL || list->capacity == 0) { 777 loader_init_layer_list(inst, list); 778 } 779 780 if (list->list == NULL) 781 return; 782 783 for (i = 0; i < prop_list_count; i++) { 784 layer = (struct loader_layer_properties *) &props[i]; 785 786 // look for duplicates 787 if (has_vk_layer_property(&layer->info, list)) { 788 continue; 789 } 790 791 // add to list at end 792 // check for enough capacity 793 if (list->count * sizeof(struct loader_layer_properties) 794 >= list->capacity) { 795 796 list->list = loader_heap_realloc(inst, 797 list->list, 798 list->capacity, 799 list->capacity * 2, 800 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 801 // double capacity 802 list->capacity *= 2; 803 } 804 805 memcpy(&list->list[list->count], layer, sizeof(struct loader_layer_properties)); 806 list->count++; 807 } 808} 809 810/** 811 * Search the search_list for any layer with a name 812 * that matches the given name and a type that matches the given type 813 * Add all matching layers to the found_list 814 * Do not add if found loader_layer_properties is already 815 * on the found_list. 816 */ 817static void loader_find_layer_name_add_list( 818 const struct loader_instance *inst, 819 const char *name, 820 const enum layer_type type, 821 const struct loader_layer_list *search_list, 822 struct loader_layer_list *found_list) 823{ 824 for (uint32_t i = 0; i < search_list->count; i++) { 825 struct loader_layer_properties *layer_prop = &search_list->list[i]; 826 if (0 == strcmp(layer_prop->info.layerName, name) && 827 (layer_prop->type & type)) { 828 /* Found a layer with the same name, add to found_list */ 829 loader_add_to_layer_list(inst, found_list, 1, layer_prop); 830 } 831 } 832} 833 834static VkExtensionProperties *get_extension_property( 835 const char *name, 836 const struct loader_extension_list *list) 837{ 838 for (uint32_t i = 0; i < list->count; i++) { 839 if (strcmp(name, list->list[i].extName) == 0) 840 return &list->list[i]; 841 } 842 return NULL; 843} 844 845/* 846 * For global exenstions implemented within the loader (i.e. DEBUG_REPORT 847 * the extension must provide two entry points for the loader to use: 848 * - "trampoline" entry point - this is the address returned by GetProcAddr 849 * and will always do what's necessary to support a global call. 850 * - "terminator" function - this function will be put at the end of the 851 * instance chain and will contain the necessary logica to call / process 852 * the extension for the appropriate ICDs that are available. 853 * There is no generic mechanism for including these functions, the references 854 * must be placed into the appropriate loader entry points. 855 * GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr requests 856 * loader_coalesce_extensions(void) - add extension records to the list of global 857 * extension available to the app. 858 * instance_disp - add function pointer for terminator function to this array. 859 * The extension itself should be in a separate file that will be 860 * linked directly with the loader. 861 */ 862 863void loader_get_icd_loader_instance_extensions( 864 const struct loader_instance *inst, 865 struct loader_icd_libs *icd_libs, 866 struct loader_extension_list *inst_exts) 867{ 868 struct loader_extension_list icd_exts; 869 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Build ICD instance extension list"); 870 // traverse scanned icd list adding non-duplicate extensions to the list 871 for (uint32_t i = 0; i < icd_libs->count; i++) { 872 loader_init_ext_list(inst, &icd_exts); 873 loader_add_global_extensions(inst, icd_libs->list[i].GetGlobalExtensionProperties, 874 icd_libs->list[i].lib_name, 875 &icd_exts); 876 loader_add_to_ext_list(inst, inst_exts, 877 icd_exts.count, 878 icd_exts.list); 879 loader_destroy_ext_list(inst, &icd_exts); 880 }; 881 882 // Traverse loader's extensions, adding non-duplicate extensions to the list 883 wsi_swapchain_add_instance_extensions(inst, inst_exts); 884 debug_report_add_instance_extensions(inst, inst_exts); 885} 886 887struct loader_icd *loader_get_icd_and_device(const VkDevice device, 888 struct loader_device **found_dev) 889{ 890 *found_dev = NULL; 891 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 892 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) { 893 for (struct loader_device *dev = icd->logical_device_list; dev; dev = dev->next) 894 if (dev->device == device) { 895 *found_dev = dev; 896 return icd; 897 } 898 } 899 } 900 return NULL; 901} 902 903static void loader_destroy_logical_device(const struct loader_instance *inst, 904 struct loader_device *dev) 905{ 906 loader_heap_free(inst, dev->app_extension_props); 907 if (dev->activated_layer_list.count) 908 loader_destroy_layer_list(inst, &dev->activated_layer_list); 909 loader_heap_free(inst, dev); 910} 911 912static struct loader_device *loader_add_logical_device( 913 const struct loader_instance *inst, 914 const VkDevice dev, 915 struct loader_device **device_list) 916{ 917 struct loader_device *new_dev; 918 919 new_dev = loader_heap_alloc(inst, sizeof(struct loader_device), VK_SYSTEM_ALLOC_TYPE_INTERNAL); 920 if (!new_dev) { 921 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc struct laoder-device"); 922 return NULL; 923 } 924 925 memset(new_dev, 0, sizeof(struct loader_device)); 926 927 new_dev->next = *device_list; 928 new_dev->device = dev; 929 *device_list = new_dev; 930 return new_dev; 931} 932 933void loader_remove_logical_device( 934 const struct loader_instance *inst, 935 VkDevice device) 936{ 937 struct loader_device *found_dev, *dev, *prev_dev; 938 struct loader_icd *icd; 939 icd = loader_get_icd_and_device(device, &found_dev); 940 941 if (!icd || !found_dev) 942 return; 943 944 prev_dev = NULL; 945 dev = icd->logical_device_list; 946 while (dev && dev != found_dev) { 947 prev_dev = dev; 948 dev = dev->next; 949 } 950 951 if (prev_dev) 952 prev_dev->next = found_dev->next; 953 else 954 icd->logical_device_list = found_dev->next; 955 loader_destroy_logical_device(inst, found_dev); 956} 957 958 959static void loader_icd_destroy( 960 struct loader_instance *ptr_inst, 961 struct loader_icd *icd) 962{ 963 ptr_inst->total_icd_count--; 964 loader_heap_free(ptr_inst, icd->gpus); 965 for (struct loader_device *dev = icd->logical_device_list; dev; ) { 966 struct loader_device *next_dev = dev->next; 967 loader_destroy_logical_device(ptr_inst, dev); 968 dev = next_dev; 969 } 970 971 loader_heap_free(ptr_inst, icd); 972} 973 974static struct loader_icd * loader_icd_create(const struct loader_instance *inst) 975{ 976 struct loader_icd *icd; 977 978 icd = loader_heap_alloc(inst, sizeof(*icd), VK_SYSTEM_ALLOC_TYPE_INTERNAL); 979 if (!icd) 980 return NULL; 981 982 memset(icd, 0, sizeof(*icd)); 983 984 return icd; 985} 986 987static struct loader_icd *loader_icd_add( 988 struct loader_instance *ptr_inst, 989 const struct loader_scanned_icds *icd_lib) 990{ 991 struct loader_icd *icd; 992 993 icd = loader_icd_create(ptr_inst); 994 if (!icd) 995 return NULL; 996 997 icd->this_icd_lib = icd_lib; 998 icd->this_instance = ptr_inst; 999 1000 /* prepend to the list */ 1001 icd->next = ptr_inst->icds; 1002 ptr_inst->icds = icd; 1003 ptr_inst->total_icd_count++; 1004 1005 return icd; 1006} 1007 1008void loader_scanned_icd_clear( 1009 const struct loader_instance *inst, 1010 struct loader_icd_libs *icd_libs) 1011{ 1012 if (icd_libs->capacity == 0) 1013 return; 1014 for (uint32_t i = 0; i < icd_libs->count; i++) { 1015 loader_platform_close_library(icd_libs->list[i].handle); 1016 loader_heap_free(inst, icd_libs->list[i].lib_name); 1017 } 1018 loader_heap_free(inst, icd_libs->list); 1019 icd_libs->capacity = 0; 1020 icd_libs->count = 0; 1021 icd_libs->list = NULL; 1022} 1023 1024static void loader_scanned_icd_init(const struct loader_instance *inst, 1025 struct loader_icd_libs *icd_libs) 1026{ 1027 loader_scanned_icd_clear(inst, icd_libs); 1028 icd_libs->capacity = 8 * sizeof(struct loader_scanned_icds); 1029 icd_libs->list = loader_heap_alloc(inst, icd_libs->capacity, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1030 1031} 1032 1033static void loader_scanned_icd_add( 1034 const struct loader_instance *inst, 1035 struct loader_icd_libs *icd_libs, 1036 const char *filename) 1037{ 1038 loader_platform_dl_handle handle; 1039 PFN_vkCreateInstance fp_create_inst; 1040 PFN_vkGetGlobalExtensionProperties fp_get_global_ext_props; 1041 PFN_vkGetInstanceProcAddr fp_get_proc_addr; 1042 struct loader_scanned_icds *new_node; 1043 1044 /* TODO implement ref counting of libraries, for now this function leaves 1045 libraries open and the scanned_icd_clear closes them */ 1046 // Used to call: dlopen(filename, RTLD_LAZY); 1047 handle = loader_platform_open_library(filename); 1048 if (!handle) { 1049 loader_log(VK_DBG_REPORT_WARN_BIT, 0, loader_platform_open_library_error(filename)); 1050 return; 1051 } 1052 1053#define LOOKUP_LD(func_ptr, func) do { \ 1054 func_ptr = (PFN_vk ##func) loader_platform_get_proc_address(handle, "vk" #func); \ 1055 if (!func_ptr) { \ 1056 loader_log(VK_DBG_REPORT_WARN_BIT, 0, loader_platform_get_proc_address_error("vk" #func)); \ 1057 return; \ 1058 } \ 1059} while (0) 1060 1061 LOOKUP_LD(fp_get_proc_addr, GetInstanceProcAddr); 1062 LOOKUP_LD(fp_create_inst, CreateInstance); 1063 LOOKUP_LD(fp_get_global_ext_props, GetGlobalExtensionProperties); 1064 1065#undef LOOKUP_LD 1066 1067 // check for enough capacity 1068 if ((icd_libs->count * sizeof(struct loader_scanned_icds)) >= icd_libs->capacity) { 1069 1070 icd_libs->list = loader_heap_realloc(inst, 1071 icd_libs->list, 1072 icd_libs->capacity, 1073 icd_libs->capacity * 2, 1074 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1075 // double capacity 1076 icd_libs->capacity *= 2; 1077 } 1078 new_node = &(icd_libs->list[icd_libs->count]); 1079 1080 new_node->handle = handle; 1081 new_node->GetInstanceProcAddr = fp_get_proc_addr; 1082 new_node->CreateInstance = fp_create_inst; 1083 new_node->GetGlobalExtensionProperties = fp_get_global_ext_props; 1084 1085 new_node->lib_name = (char *) loader_heap_alloc(inst, 1086 strlen(filename) + 1, 1087 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1088 if (!new_node->lib_name) { 1089 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Out of memory can't add icd"); 1090 return; 1091 } 1092 strcpy(new_node->lib_name, filename); 1093 icd_libs->count++; 1094} 1095 1096static bool loader_icd_init_entrys(struct loader_icd *icd, 1097 VkInstance inst, 1098 const PFN_vkGetInstanceProcAddr fp_gipa) 1099{ 1100 /* initialize entrypoint function pointers */ 1101 1102 #define LOOKUP_GIPA(func, required) do { \ 1103 icd->func = (PFN_vk ##func) fp_gipa(inst, "vk" #func); \ 1104 if (!icd->func && required) { \ 1105 loader_log(VK_DBG_REPORT_WARN_BIT, 0, \ 1106 loader_platform_get_proc_address_error("vk" #func)); \ 1107 return false; \ 1108 } \ 1109 } while (0) 1110 1111 LOOKUP_GIPA(GetDeviceProcAddr, true); 1112 LOOKUP_GIPA(DestroyInstance, true); 1113 LOOKUP_GIPA(EnumeratePhysicalDevices, true); 1114 LOOKUP_GIPA(GetPhysicalDeviceFeatures, true); 1115 LOOKUP_GIPA(GetPhysicalDeviceFormatProperties, true); 1116 LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties, true); 1117 LOOKUP_GIPA(CreateDevice, true); 1118 LOOKUP_GIPA(GetPhysicalDeviceProperties, true); 1119 LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties, true); 1120 LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties, true); 1121 LOOKUP_GIPA(GetPhysicalDeviceExtensionProperties, true); 1122 LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties, true); 1123 LOOKUP_GIPA(DbgCreateMsgCallback, false); 1124 LOOKUP_GIPA(DbgDestroyMsgCallback, false); 1125 LOOKUP_GIPA(GetPhysicalDeviceSurfaceSupportKHR, false); 1126 1127#undef LOOKUP_GIPA 1128 1129 return true; 1130} 1131 1132static void loader_debug_init(void) 1133{ 1134 const char *env; 1135 1136 if (g_loader_debug > 0) 1137 return; 1138 1139 g_loader_debug = 0; 1140 1141 /* parse comma-separated debug options */ 1142 env = getenv("VK_LOADER_DEBUG"); 1143 while (env) { 1144 const char *p = strchr(env, ','); 1145 size_t len; 1146 1147 if (p) 1148 len = p - env; 1149 else 1150 len = strlen(env); 1151 1152 if (len > 0) { 1153 if (strncmp(env, "warn", len) == 0) { 1154 g_loader_debug |= LOADER_WARN_BIT; 1155 g_loader_log_msgs |= VK_DBG_REPORT_WARN_BIT; 1156 } else if (strncmp(env, "info", len) == 0) { 1157 g_loader_debug |= LOADER_INFO_BIT; 1158 g_loader_log_msgs |= VK_DBG_REPORT_INFO_BIT; 1159 } else if (strncmp(env, "perf", len) == 0) { 1160 g_loader_debug |= LOADER_PERF_BIT; 1161 g_loader_log_msgs |= VK_DBG_REPORT_PERF_WARN_BIT; 1162 } else if (strncmp(env, "error", len) == 0) { 1163 g_loader_debug |= LOADER_ERROR_BIT; 1164 g_loader_log_msgs |= VK_DBG_REPORT_ERROR_BIT; 1165 } else if (strncmp(env, "debug", len) == 0) { 1166 g_loader_debug |= LOADER_DEBUG_BIT; 1167 g_loader_log_msgs |= VK_DBG_REPORT_DEBUG_BIT; 1168 } 1169 } 1170 1171 if (!p) 1172 break; 1173 1174 env = p + 1; 1175 } 1176} 1177 1178void loader_initialize(void) 1179{ 1180 // initialize a mutex 1181 loader_platform_thread_create_mutex(&loader_lock); 1182 1183 // initialize logging 1184 loader_debug_init(); 1185 1186 // initial cJSON to use alloc callbacks 1187 cJSON_Hooks alloc_fns = { 1188 .malloc_fn = loader_tls_heap_alloc, 1189 .free_fn = loader_tls_heap_free, 1190 }; 1191 cJSON_InitHooks(&alloc_fns); 1192} 1193 1194struct loader_manifest_files { 1195 uint32_t count; 1196 char **filename_list; 1197}; 1198 1199/** 1200 * Get next file or dirname given a string list or registry key path 1201 * 1202 * \returns 1203 * A pointer to first char in the next path. 1204 * The next path (or NULL) in the list is returned in next_path. 1205 * Note: input string is modified in some cases. PASS IN A COPY! 1206 */ 1207static char *loader_get_next_path(char *path) 1208{ 1209 uint32_t len; 1210 char *next; 1211 1212 if (path == NULL) 1213 return NULL; 1214 next = strchr(path, PATH_SEPERATOR); 1215 if (next == NULL) { 1216 len = (uint32_t) strlen(path); 1217 next = path + len; 1218 } 1219 else { 1220 *next = '\0'; 1221 next++; 1222 } 1223 1224 return next; 1225} 1226 1227/** 1228 * Given a path which is absolute or relative. Expand the path if relative otherwise 1229 * leave the path unmodified if absolute. The path which is relative from is 1230 * given in rel_base and should include trailing directory seperator '/' 1231 * 1232 * \returns 1233 * A string in out_fullpath of the full absolute path 1234 * Side effect is that dir string maybe modified. 1235 */ 1236static void loader_expand_path(const char *path, 1237 const char *rel_base, 1238 size_t out_size, 1239 char *out_fullpath) 1240{ 1241 if (loader_platform_is_path_absolute(path)) { 1242 strncpy(out_fullpath, path, out_size); 1243 out_fullpath[out_size - 1] = '\0'; 1244 } 1245 else { 1246 // convert relative to absolute path based on rel_base 1247 size_t len = strlen(path); 1248 strncpy(out_fullpath, rel_base, out_size); 1249 out_fullpath[out_size - 1] = '\0'; 1250 assert(out_size >= strlen(out_fullpath) + len + 1); 1251 strncat(out_fullpath, path, len); 1252 } 1253} 1254 1255/** 1256 * Given a filename (file) and a list of paths (dir), try to find an existing 1257 * file in the paths. If filename already is a path then no 1258 * searching in the given paths. 1259 * 1260 * \returns 1261 * A string in out_fullpath of either the full path or file. 1262 * Side effect is that dir string maybe modified. 1263 */ 1264static void loader_get_fullpath(const char *file, 1265 char *dir, 1266 size_t out_size, 1267 char *out_fullpath) 1268{ 1269 char *next_dir; 1270 if (strchr(file,DIRECTORY_SYMBOL) == NULL) { 1271 //find file exists with prepending given path 1272 while (*dir) { 1273 next_dir = loader_get_next_path(dir); 1274 snprintf(out_fullpath, out_size, "%s%c%s", 1275 dir, DIRECTORY_SYMBOL, file); 1276 if (loader_platform_file_exists(out_fullpath)) { 1277 return; 1278 } 1279 dir = next_dir; 1280 } 1281 } 1282 snprintf(out_fullpath, out_size, "%s", file); 1283} 1284 1285/** 1286 * Read a JSON file into a buffer. 1287 * 1288 * \returns 1289 * A pointer to a cJSON object representing the JSON parse tree. 1290 * This returned buffer should be freed by caller. 1291 */ 1292static cJSON *loader_get_json(const char *filename) 1293{ 1294 FILE *file; 1295 char *json_buf; 1296 cJSON *json; 1297 uint64_t len; 1298 file = fopen(filename,"rb"); 1299 if (!file) { 1300 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Couldn't open JSON file %s", filename); 1301 return NULL; 1302 } 1303 fseek(file, 0, SEEK_END); 1304 len = ftell(file); 1305 fseek(file, 0, SEEK_SET); 1306 json_buf = (char*) loader_stack_alloc(len+1); 1307 if (json_buf == NULL) { 1308 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get JSON file"); 1309 fclose(file); 1310 return NULL; 1311 } 1312 if (fread(json_buf, sizeof(char), len, file) != len) { 1313 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "fread failed can't get JSON file"); 1314 fclose(file); 1315 return NULL; 1316 } 1317 fclose(file); 1318 json_buf[len] = '\0'; 1319 1320 //parse text from file 1321 json = cJSON_Parse(json_buf); 1322 if (json == NULL) 1323 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Can't parse JSON file %s", filename); 1324 return json; 1325} 1326 1327/** 1328 * Do a deep copy of the loader_layer_properties structure. 1329 */ 1330static void loader_copy_layer_properties( 1331 const struct loader_instance *inst, 1332 struct loader_layer_properties *dst, 1333 struct loader_layer_properties *src) 1334{ 1335 memcpy(dst, src, sizeof (*src)); 1336 dst->instance_extension_list.list = loader_heap_alloc( 1337 inst, 1338 sizeof(VkExtensionProperties) * 1339 src->instance_extension_list.count, 1340 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1341 dst->instance_extension_list.capacity = sizeof(VkExtensionProperties) * 1342 src->instance_extension_list.count; 1343 memcpy(dst->instance_extension_list.list, src->instance_extension_list.list, 1344 dst->instance_extension_list.capacity); 1345 dst->device_extension_list.list = loader_heap_alloc( 1346 inst, 1347 sizeof(VkExtensionProperties) * 1348 src->device_extension_list.count, 1349 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1350 dst->device_extension_list.capacity = sizeof(VkExtensionProperties) * 1351 src->device_extension_list.count; 1352 memcpy(dst->device_extension_list.list, src->device_extension_list.list, 1353 dst->device_extension_list.capacity); 1354} 1355 1356/** 1357 * Given a cJSON struct (json) of the top level JSON object from layer manifest 1358 * file, add entry to the layer_list. 1359 * Fill out the layer_properties in this list entry from the input cJSON object. 1360 * 1361 * \returns 1362 * void 1363 * layer_list has a new entry and initialized accordingly. 1364 * If the json input object does not have all the required fields no entry 1365 * is added to the list. 1366 */ 1367static void loader_add_layer_properties(const struct loader_instance *inst, 1368 struct loader_layer_list *layer_instance_list, 1369 struct loader_layer_list *layer_device_list, 1370 cJSON *json, 1371 bool is_implicit, 1372 char *filename) 1373{ 1374 /* Fields in layer manifest file that are required: 1375 * (required) “file_format_version” 1376 * following are required in the "layer" object: 1377 * (required) "name" 1378 * (required) "type" 1379 * (required) “library_path” 1380 * (required) “abi_versions” 1381 * (required) “implementation_version” 1382 * (required) “description” 1383 * (required for implicit layers) “disable_environment” 1384 * 1385 * First get all required items and if any missing abort 1386 */ 1387 1388 cJSON *item, *layer_node, *ext_item; 1389 char *temp; 1390 char *name, *type, *library_path, *abi_versions; 1391 char *implementation_version, *description; 1392 cJSON *disable_environment; 1393 int i; 1394 VkExtensionProperties ext_prop; 1395 item = cJSON_GetObjectItem(json, "file_format_version"); 1396 if (item == NULL) { 1397 return; 1398 } 1399 char *file_vers = cJSON_PrintUnformatted(item); 1400 loader_log(VK_DBG_REPORT_INFO_BIT, 0, "Found manifest file %s, version %s", 1401 filename, file_vers); 1402 if (strcmp(file_vers, "\"0.9.0\"") != 0) 1403 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Unexpected manifest file version (expected 0.9.0), may cause errors"); 1404 loader_tls_heap_free(file_vers); 1405 1406 layer_node = cJSON_GetObjectItem(json, "layer"); 1407 if (layer_node == NULL) { 1408 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"layer\" object in manifest JSON file, skipping"); 1409 return; 1410 } 1411 1412 // loop through all "layer" objects in the file 1413 do { 1414#define GET_JSON_OBJECT(node, var) { \ 1415 var = cJSON_GetObjectItem(node, #var); \ 1416 if (var == NULL) { \ 1417 layer_node = layer_node->next; \ 1418 continue; \ 1419 } \ 1420 } 1421#define GET_JSON_ITEM(node, var) { \ 1422 item = cJSON_GetObjectItem(node, #var); \ 1423 if (item == NULL) { \ 1424 layer_node = layer_node->next; \ 1425 continue; \ 1426 } \ 1427 temp = cJSON_Print(item); \ 1428 temp[strlen(temp) - 1] = '\0'; \ 1429 var = loader_stack_alloc(strlen(temp) + 1); \ 1430 strcpy(var, &temp[1]); \ 1431 loader_tls_heap_free(temp); \ 1432 } 1433 GET_JSON_ITEM(layer_node, name) 1434 GET_JSON_ITEM(layer_node, type) 1435 GET_JSON_ITEM(layer_node, library_path) 1436 GET_JSON_ITEM(layer_node, abi_versions) 1437 GET_JSON_ITEM(layer_node, implementation_version) 1438 GET_JSON_ITEM(layer_node, description) 1439 if (is_implicit) { 1440 GET_JSON_OBJECT(layer_node, disable_environment) 1441 } 1442#undef GET_JSON_ITEM 1443#undef GET_JSON_OBJECT 1444 1445 // add list entry 1446 struct loader_layer_properties *props; 1447 if (!strcmp(type, "DEVICE")) { 1448 if (layer_device_list == NULL) { 1449 layer_node = layer_node->next; 1450 continue; 1451 } 1452 props = loader_get_next_layer_property(inst, layer_device_list); 1453 props->type = (is_implicit) ? VK_LAYER_TYPE_DEVICE_IMPLICIT : VK_LAYER_TYPE_DEVICE_EXPLICIT; 1454 } 1455 if (!strcmp(type, "INSTANCE")) { 1456 if (layer_instance_list == NULL) { 1457 layer_node = layer_node->next; 1458 continue; 1459 } 1460 props = loader_get_next_layer_property(inst, layer_instance_list); 1461 props->type = (is_implicit) ? VK_LAYER_TYPE_INSTANCE_IMPLICIT : VK_LAYER_TYPE_INSTANCE_EXPLICIT; 1462 } 1463 if (!strcmp(type, "GLOBAL")) { 1464 if (layer_instance_list != NULL) 1465 props = loader_get_next_layer_property(inst, layer_instance_list); 1466 else if (layer_device_list != NULL) 1467 props = loader_get_next_layer_property(inst, layer_device_list); 1468 else { 1469 layer_node = layer_node->next; 1470 continue; 1471 } 1472 props->type = (is_implicit) ? VK_LAYER_TYPE_GLOBAL_IMPLICIT : VK_LAYER_TYPE_GLOBAL_EXPLICIT; 1473 } 1474 1475 strncpy(props->info.layerName, name, sizeof (props->info.layerName)); 1476 props->info.layerName[sizeof (props->info.layerName) - 1] = '\0'; 1477 1478 char *fullpath = props->lib_name; 1479 char *rel_base; 1480 if (strchr(library_path, DIRECTORY_SYMBOL) == NULL) { 1481 // a filename which is assumed in the system directory 1482 char *def_path = loader_stack_alloc(strlen(DEFAULT_VK_LAYERS_PATH) + 1); 1483 strcpy(def_path, DEFAULT_VK_LAYERS_PATH); 1484 loader_get_fullpath(library_path, def_path, MAX_STRING_SIZE, fullpath); 1485 } else { 1486 // a relative or absolute path 1487 char *name_copy = loader_stack_alloc(strlen(filename) + 2); 1488 size_t len; 1489 strcpy(name_copy, filename); 1490 rel_base = loader_platform_dirname(name_copy); 1491 len = strlen(rel_base); 1492 rel_base[len] = DIRECTORY_SYMBOL; 1493 rel_base[len + 1] = '\0'; 1494 loader_expand_path(library_path, rel_base, MAX_STRING_SIZE, fullpath); 1495 } 1496 props->info.specVersion = loader_make_version(abi_versions); 1497 props->info.implVersion = loader_make_version(implementation_version); 1498 strncpy((char *) props->info.description, description, sizeof (props->info.description)); 1499 props->info.description[sizeof (props->info.description) - 1] = '\0'; 1500 if (is_implicit) { 1501 strncpy(props->disable_env_var.name, disable_environment->child->string, sizeof (props->disable_env_var.name)); 1502 props->disable_env_var.name[sizeof (props->disable_env_var.name) - 1] = '\0'; 1503 strncpy(props->disable_env_var.value, disable_environment->child->valuestring, sizeof (props->disable_env_var.value)); 1504 props->disable_env_var.value[sizeof (props->disable_env_var.value) - 1] = '\0'; 1505 } 1506 1507 /** 1508 * Now get all optional items and objects and put in list: 1509 * functions 1510 * instance_extensions 1511 * device_extensions 1512 * enable_environment (implicit layers only) 1513 */ 1514#define GET_JSON_OBJECT(node, var) { \ 1515 var = cJSON_GetObjectItem(node, #var); \ 1516 } 1517#define GET_JSON_ITEM(node, var) { \ 1518 item = cJSON_GetObjectItem(node, #var); \ 1519 if (item != NULL) { \ 1520 temp = cJSON_Print(item); \ 1521 temp[strlen(temp) - 1] = '\0'; \ 1522 var = loader_stack_alloc(strlen(temp) + 1);\ 1523 strcpy(var, &temp[1]); \ 1524 loader_tls_heap_free(temp); \ 1525 } \ 1526 } 1527 1528 cJSON *instance_extensions, *device_extensions, *functions, *enable_environment; 1529 char *vkGetInstanceProcAddr = NULL, *vkGetDeviceProcAddr = NULL, *version; 1530 GET_JSON_OBJECT(layer_node, functions) 1531 if (functions != NULL) { 1532 GET_JSON_ITEM(functions, vkGetInstanceProcAddr) 1533 GET_JSON_ITEM(functions, vkGetDeviceProcAddr) 1534 if (vkGetInstanceProcAddr != NULL) 1535 strncpy(props->functions.str_gipa, vkGetInstanceProcAddr, sizeof (props->functions.str_gipa)); 1536 props->functions.str_gipa[sizeof (props->functions.str_gipa) - 1] = '\0'; 1537 if (vkGetDeviceProcAddr != NULL) 1538 strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr, sizeof (props->functions.str_gdpa)); 1539 props->functions.str_gdpa[sizeof (props->functions.str_gdpa) - 1] = '\0'; 1540 } 1541 GET_JSON_OBJECT(layer_node, instance_extensions) 1542 if (instance_extensions != NULL) { 1543 int count = cJSON_GetArraySize(instance_extensions); 1544 for (i = 0; i < count; i++) { 1545 ext_item = cJSON_GetArrayItem(instance_extensions, i); 1546 GET_JSON_ITEM(ext_item, name) 1547 GET_JSON_ITEM(ext_item, version) 1548 strncpy(ext_prop.extName, name, sizeof (ext_prop.extName)); 1549 ext_prop.extName[sizeof (ext_prop.extName) - 1] = '\0'; 1550 ext_prop.specVersion = loader_make_version(version); 1551 loader_add_to_ext_list(inst, &props->instance_extension_list, 1, &ext_prop); 1552 } 1553 } 1554 GET_JSON_OBJECT(layer_node, device_extensions) 1555 if (device_extensions != NULL) { 1556 int count = cJSON_GetArraySize(device_extensions); 1557 for (i = 0; i < count; i++) { 1558 ext_item = cJSON_GetArrayItem(device_extensions, i); 1559 GET_JSON_ITEM(ext_item, name); 1560 GET_JSON_ITEM(ext_item, version); 1561 strncpy(ext_prop.extName, name, sizeof (ext_prop.extName)); 1562 ext_prop.extName[sizeof (ext_prop.extName) - 1] = '\0'; 1563 ext_prop.specVersion = loader_make_version(version); 1564 loader_add_to_ext_list(inst, &props->device_extension_list, 1, &ext_prop); 1565 } 1566 } 1567 if (is_implicit) { 1568 GET_JSON_OBJECT(layer_node, enable_environment) 1569 strncpy(props->enable_env_var.name, enable_environment->child->string, sizeof (props->enable_env_var.name)); 1570 props->enable_env_var.name[sizeof (props->enable_env_var.name) - 1] = '\0'; 1571 strncpy(props->enable_env_var.value, enable_environment->child->valuestring, sizeof (props->enable_env_var.value)); 1572 props->enable_env_var.value[sizeof (props->enable_env_var.value) - 1] = '\0'; 1573 } 1574#undef GET_JSON_ITEM 1575#undef GET_JSON_OBJECT 1576 // for global layers need to add them to both device and instance list 1577 if (!strcmp(type, "GLOBAL")) { 1578 struct loader_layer_properties *dev_props; 1579 if (layer_instance_list == NULL || layer_device_list == NULL) { 1580 layer_node = layer_node->next; 1581 continue; 1582 } 1583 dev_props = loader_get_next_layer_property(inst, layer_device_list); 1584 //copy into device layer list 1585 loader_copy_layer_properties(inst, dev_props, props); 1586 } 1587 layer_node = layer_node->next; 1588 } while (layer_node != NULL); 1589 return; 1590} 1591 1592/** 1593 * Find the Vulkan library manifest files. 1594 * 1595 * This function scans the location or env_override directories/files 1596 * for a list of JSON manifest files. If env_override is non-NULL 1597 * and has a valid value. Then the location is ignored. Otherwise 1598 * location is used to look for manifest files. The location 1599 * is interpreted as Registry path on Windows and a directory path(s) 1600 * on Linux. 1601 * 1602 * \returns 1603 * A string list of manifest files to be opened in out_files param. 1604 * List has a pointer to string for each manifest filename. 1605 * When done using the list in out_files, pointers should be freed. 1606 * Location or override string lists can be either files or directories as follows: 1607 * | location | override 1608 * -------------------------------- 1609 * Win ICD | files | files 1610 * Win Layer | files | dirs 1611 * Linux ICD | dirs | files 1612 * Linux Layer| dirs | dirs 1613 */ 1614static void loader_get_manifest_files(const struct loader_instance *inst, 1615 const char *env_override, 1616 bool is_layer, 1617 const char *location, 1618 struct loader_manifest_files *out_files) 1619{ 1620 char *override = NULL; 1621 char *loc; 1622 char *file, *next_file, *name; 1623 size_t alloced_count = 64; 1624 char full_path[2048]; 1625 DIR *sysdir = NULL; 1626 bool list_is_dirs = false; 1627 struct dirent *dent; 1628 1629 out_files->count = 0; 1630 out_files->filename_list = NULL; 1631 1632 if (env_override != NULL && (override = getenv(env_override))) { 1633#if defined(__linux__) 1634 if (geteuid() != getuid()) { 1635 /* Don't allow setuid apps to use the env var: */ 1636 override = NULL; 1637 } 1638#endif 1639 } 1640 1641 if (location == NULL) { 1642 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 1643 "Can't get manifest files with NULL location, env_override=%s", 1644 env_override); 1645 return; 1646 } 1647 1648#if defined(__linux__) 1649 list_is_dirs = (override == NULL || is_layer) ? true : false; 1650#else //WIN32 1651 list_is_dirs = (is_layer && override != NULL) ? true : false; 1652#endif 1653 // Make a copy of the input we are using so it is not modified 1654 // Also handle getting the location(s) from registry on Windows 1655 if (override == NULL) { 1656 loc = loader_stack_alloc(strlen(location) + 1); 1657 if (loc == NULL) { 1658 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1659 return; 1660 } 1661 strcpy(loc, location); 1662#if defined (_WIN32) 1663 loc = loader_get_registry_files(inst, loc); 1664 if (loc == NULL) { 1665 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Registry lookup failed can't get manifest files"); 1666 return; 1667 } 1668#endif 1669 } 1670 else { 1671 loc = loader_stack_alloc(strlen(override) + 1); 1672 if (loc == NULL) { 1673 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1674 return; 1675 } 1676 strcpy(loc, override); 1677 } 1678 1679 // Print out the paths being searched if debugging is enabled 1680 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Searching the following paths for manifest files: %s\n", loc); 1681 1682 file = loc; 1683 while (*file) { 1684 next_file = loader_get_next_path(file); 1685 if (list_is_dirs) { 1686 sysdir = opendir(file); 1687 name = NULL; 1688 if (sysdir) { 1689 dent = readdir(sysdir); 1690 if (dent == NULL) 1691 break; 1692 name = &(dent->d_name[0]); 1693 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1694 name = full_path; 1695 } 1696 } 1697 else { 1698#if defined(__linux__) 1699 // only Linux has relative paths 1700 char *dir; 1701 // make a copy of location so it isn't modified 1702 dir = loader_stack_alloc(strlen(location) + 1); 1703 if (dir == NULL) { 1704 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1705 return; 1706 } 1707 strcpy(dir, location); 1708 1709 loader_get_fullpath(file, dir, sizeof(full_path), full_path); 1710 1711 name = full_path; 1712#else // WIN32 1713 name = file; 1714#endif 1715 } 1716 while (name) { 1717 /* Look for files ending with ".json" suffix */ 1718 uint32_t nlen = (uint32_t) strlen(name); 1719 const char *suf = name + nlen - 5; 1720 if ((nlen > 5) && !strncmp(suf, ".json", 5)) { 1721 if (out_files->count == 0) { 1722 out_files->filename_list = loader_heap_alloc(inst, 1723 alloced_count * sizeof(char *), 1724 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1725 } 1726 else if (out_files->count == alloced_count) { 1727 out_files->filename_list = loader_heap_realloc(inst, 1728 out_files->filename_list, 1729 alloced_count * sizeof(char *), 1730 alloced_count * sizeof(char *) * 2, 1731 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1732 alloced_count *= 2; 1733 } 1734 if (out_files->filename_list == NULL) { 1735 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't alloc manifest file list"); 1736 return; 1737 } 1738 out_files->filename_list[out_files->count] = loader_heap_alloc( 1739 inst, 1740 strlen(name) + 1, 1741 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1742 if (out_files->filename_list[out_files->count] == NULL) { 1743 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1744 return; 1745 } 1746 strcpy(out_files->filename_list[out_files->count], name); 1747 out_files->count++; 1748 } else if (!list_is_dirs) { 1749 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Skipping manifest file %s, file name must end in .json", name); 1750 } 1751 if (list_is_dirs) { 1752 dent = readdir(sysdir); 1753 if (dent == NULL) 1754 break; 1755 name = &(dent->d_name[0]); 1756 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1757 name = full_path; 1758 } 1759 else { 1760 break; 1761 } 1762 } 1763 if (sysdir) 1764 closedir(sysdir); 1765 file = next_file; 1766 } 1767 return; 1768} 1769 1770void loader_init_icd_lib_list() 1771{ 1772 1773} 1774 1775void loader_destroy_icd_lib_list() 1776{ 1777 1778} 1779/** 1780 * Try to find the Vulkan ICD driver(s). 1781 * 1782 * This function scans the default system loader path(s) or path 1783 * specified by the \c VK_ICD_FILENAMES environment variable in 1784 * order to find loadable VK ICDs manifest files. From these 1785 * manifest files it finds the ICD libraries. 1786 * 1787 * \returns 1788 * a list of icds that were discovered 1789 */ 1790void loader_icd_scan( 1791 const struct loader_instance *inst, 1792 struct loader_icd_libs *icds) 1793{ 1794 char *file_str; 1795 struct loader_manifest_files manifest_files; 1796 1797 loader_scanned_icd_init(inst, icds); 1798 // Get a list of manifest files for ICDs 1799 loader_get_manifest_files(inst, "VK_ICD_FILENAMES", false, 1800 DEFAULT_VK_DRIVERS_INFO, &manifest_files); 1801 if (manifest_files.count == 0) 1802 return; 1803 for (uint32_t i = 0; i < manifest_files.count; i++) { 1804 file_str = manifest_files.filename_list[i]; 1805 if (file_str == NULL) 1806 continue; 1807 1808 cJSON *json; 1809 json = loader_get_json(file_str); 1810 if (!json) 1811 continue; 1812 cJSON *item; 1813 item = cJSON_GetObjectItem(json, "file_format_version"); 1814 if (item == NULL) 1815 return; 1816 char *file_vers = cJSON_Print(item); 1817 loader_log(VK_DBG_REPORT_INFO_BIT, 0, "Found manifest file %s, version %s", 1818 file_str, file_vers); 1819 if (strcmp(file_vers, "\"1.0.0\"") != 0) 1820 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Unexpected manifest file version (expected 1.0.0), may cause errors"); 1821 loader_tls_heap_free(file_vers); 1822 item = cJSON_GetObjectItem(json, "ICD"); 1823 if (item != NULL) { 1824 item = cJSON_GetObjectItem(item, "library_path"); 1825 if (item != NULL) { 1826 char *temp= cJSON_Print(item); 1827 if (!temp || strlen(temp) == 0) { 1828 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 1829 loader_tls_heap_free(temp); 1830 loader_heap_free(inst, file_str); 1831 cJSON_Delete(json); 1832 continue; 1833 } 1834 //strip out extra quotes 1835 temp[strlen(temp) - 1] = '\0'; 1836 char *library_path = loader_stack_alloc(strlen(temp) + 1); 1837 strcpy(library_path, &temp[1]); 1838 loader_tls_heap_free(temp); 1839 if (!library_path || strlen(library_path) == 0) { 1840 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 1841 loader_heap_free(inst, file_str); 1842 cJSON_Delete(json); 1843 continue; 1844 } 1845 char *fullpath; 1846 uint32_t path_len; 1847 char *rel_base; 1848 // Print out the paths being searched if debugging is enabled 1849 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Searching for ICD drivers named %s default dir %s\n", library_path, DEFAULT_VK_DRIVERS_PATH); 1850 if (strchr(library_path, DIRECTORY_SYMBOL) == NULL) { 1851 // a filename which is assumed in the system directory 1852 char *def_path = loader_stack_alloc(strlen(DEFAULT_VK_DRIVERS_PATH) + 1); 1853 strcpy(def_path, DEFAULT_VK_DRIVERS_PATH); 1854 path_len = strlen(DEFAULT_VK_DRIVERS_PATH) + strlen(library_path) + 2; 1855 fullpath = loader_stack_alloc(path_len); 1856#if defined(__linux__) 1857 loader_get_fullpath(library_path, def_path, path_len, fullpath); 1858#else // WIN32 1859 strncpy(fullpath, library_path, sizeof (fullpath)); 1860 fullpath[sizeof (fullpath) - 1] = '\0'; 1861#endif 1862 } else { 1863 // a relative or absolute path 1864 char *name_copy = loader_stack_alloc(strlen(file_str) + 2); 1865 size_t len; 1866 strcpy(name_copy, file_str); 1867 rel_base = loader_platform_dirname(name_copy); 1868 len = strlen(rel_base); 1869 rel_base[len] = DIRECTORY_SYMBOL; 1870 rel_base[len + 1] = '\0'; 1871 path_len = strlen(rel_base) + strlen(library_path) + 2; 1872 fullpath = loader_stack_alloc(path_len); 1873 loader_expand_path(library_path, rel_base, path_len, fullpath); 1874 } 1875 loader_scanned_icd_add(inst, icds, fullpath); 1876 } 1877 1878 } 1879 else 1880 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"ICD\" object in ICD JSON file %s, skipping", file_str); 1881 1882 loader_heap_free(inst, file_str); 1883 cJSON_Delete(json); 1884 } 1885 loader_heap_free(inst, manifest_files.filename_list); 1886 1887} 1888 1889 1890void loader_layer_scan( 1891 const struct loader_instance *inst, 1892 struct loader_layer_list *instance_layers, 1893 struct loader_layer_list *device_layers) 1894{ 1895 char *file_str; 1896 struct loader_manifest_files manifest_files; 1897 cJSON *json; 1898 uint32_t i; 1899 1900 // Get a list of manifest files for layers 1901 loader_get_manifest_files(inst, LAYERS_PATH_ENV, true, DEFAULT_VK_LAYERS_INFO, 1902 &manifest_files); 1903 if (manifest_files.count == 0) 1904 return; 1905 1906#if 0 //TODO 1907 /** 1908 * We need a list of the layer libraries, not just a list of 1909 * the layer properties (a layer library could expose more than 1910 * one layer property). This list of scanned layers would be 1911 * used to check for global and physicaldevice layer properties. 1912 */ 1913 if (!loader_init_layer_library_list(&loader.scanned_layer_libraries)) { 1914 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 1915 "Alloc for layer list failed: %s line: %d", __FILE__, __LINE__); 1916 return; 1917 } 1918#endif 1919 1920 /* cleanup any previously scanned libraries */ 1921 loader_delete_layer_properties(inst, instance_layers); 1922 loader_delete_layer_properties(inst, device_layers); 1923 1924 1925 for (i = 0; i < manifest_files.count; i++) { 1926 file_str = manifest_files.filename_list[i]; 1927 if (file_str == NULL) 1928 continue; 1929 1930 // parse file into JSON struct 1931 json = loader_get_json(file_str); 1932 if (!json) { 1933 continue; 1934 } 1935 1936 //TODO pass in implicit versus explicit bool 1937 //TODO error if device layers expose instance_extensions 1938 //TODO error if instance layers expose device extensions 1939 loader_add_layer_properties(inst, 1940 instance_layers, 1941 device_layers, 1942 json, 1943 false, 1944 file_str); 1945 1946 loader_heap_free(inst, file_str); 1947 cJSON_Delete(json); 1948 } 1949 loader_heap_free(inst, manifest_files.filename_list); 1950 1951} 1952 1953static PFN_vkVoidFunction VKAPI loader_gpa_instance_internal(VkInstance inst, const char * pName) 1954{ 1955 // inst is not wrapped 1956 if (inst == VK_NULL_HANDLE) { 1957 return NULL; 1958 } 1959 VkLayerInstanceDispatchTable* disp_table = * (VkLayerInstanceDispatchTable **) inst; 1960 void *addr; 1961 1962 if (!strcmp(pName, "vkGetInstanceProcAddr")) 1963 return (void *) loader_gpa_instance_internal; 1964 1965 if (disp_table == NULL) 1966 return NULL; 1967 1968 addr = loader_lookup_instance_dispatch_table(disp_table, pName); 1969 if (addr) { 1970 return addr; 1971 } 1972 1973 if (disp_table->GetInstanceProcAddr == NULL) { 1974 return NULL; 1975 } 1976 return disp_table->GetInstanceProcAddr(inst, pName); 1977} 1978 1979struct loader_icd * loader_get_icd(const VkPhysicalDevice gpu, uint32_t *gpu_index) 1980{ 1981 1982 *gpu_index = 0; 1983 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 1984 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) { 1985 for (uint32_t i = 0; i < icd->gpu_count; i++) 1986 if (icd->gpus[i] == gpu) { 1987 *gpu_index = i; 1988 return icd; 1989 } 1990 } 1991 } 1992 return NULL; 1993} 1994 1995static loader_platform_dl_handle loader_add_layer_lib( 1996 const struct loader_instance *inst, 1997 const char *chain_type, 1998 struct loader_layer_properties *layer_prop) 1999{ 2000 struct loader_lib_info *new_layer_lib_list, *my_lib; 2001 size_t new_alloc_size; 2002 /* 2003 * TODO: We can now track this information in the 2004 * scanned_layer_libraries list. 2005 */ 2006 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2007 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2008 /* Have already loaded this library, just increment ref count */ 2009 loader.loaded_layer_lib_list[i].ref_count++; 2010 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2011 "%s Chain: Increment layer reference count for layer library %s", 2012 chain_type, layer_prop->lib_name); 2013 return loader.loaded_layer_lib_list[i].lib_handle; 2014 } 2015 } 2016 2017 /* Haven't seen this library so load it */ 2018 new_alloc_size = 0; 2019 if (loader.loaded_layer_lib_capacity == 0) 2020 new_alloc_size = 8 * sizeof(struct loader_lib_info); 2021 else if (loader.loaded_layer_lib_capacity <= loader.loaded_layer_lib_count * 2022 sizeof(struct loader_lib_info)) 2023 new_alloc_size = loader.loaded_layer_lib_capacity * 2; 2024 2025 if (new_alloc_size) { 2026 new_layer_lib_list = loader_heap_realloc( 2027 inst, loader.loaded_layer_lib_list, 2028 loader.loaded_layer_lib_capacity, 2029 new_alloc_size, 2030 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 2031 if (!new_layer_lib_list) { 2032 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "loader: realloc failed in loader_add_layer_lib"); 2033 return NULL; 2034 } 2035 loader.loaded_layer_lib_capacity = new_alloc_size; 2036 } else 2037 new_layer_lib_list = loader.loaded_layer_lib_list; 2038 my_lib = &new_layer_lib_list[loader.loaded_layer_lib_count]; 2039 2040 strncpy(my_lib->lib_name, layer_prop->lib_name, sizeof(my_lib->lib_name)); 2041 my_lib->lib_name[sizeof(my_lib->lib_name) - 1] = '\0'; 2042 my_lib->ref_count = 0; 2043 my_lib->lib_handle = NULL; 2044 2045 if ((my_lib->lib_handle = loader_platform_open_library(my_lib->lib_name)) == NULL) { 2046 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2047 loader_platform_open_library_error(my_lib->lib_name)); 2048 return NULL; 2049 } else { 2050 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2051 "Chain: %s: Loading layer library %s", 2052 chain_type, layer_prop->lib_name); 2053 } 2054 loader.loaded_layer_lib_count++; 2055 loader.loaded_layer_lib_list = new_layer_lib_list; 2056 my_lib->ref_count++; 2057 2058 return my_lib->lib_handle; 2059} 2060 2061static void loader_remove_layer_lib( 2062 struct loader_instance *inst, 2063 struct loader_layer_properties *layer_prop) 2064{ 2065 uint32_t idx; 2066 struct loader_lib_info *new_layer_lib_list, *my_lib = NULL; 2067 2068 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2069 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2070 /* found matching library */ 2071 idx = i; 2072 my_lib = &loader.loaded_layer_lib_list[i]; 2073 break; 2074 } 2075 } 2076 2077 if (my_lib) { 2078 my_lib->ref_count--; 2079 if (my_lib->ref_count > 0) { 2080 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2081 "Decrement reference count for layer library %s", layer_prop->lib_name); 2082 return; 2083 } 2084 } 2085 loader_platform_close_library(my_lib->lib_handle); 2086 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2087 "Unloading layer library %s", layer_prop->lib_name); 2088 2089 /* Need to remove unused library from list */ 2090 new_layer_lib_list = loader_heap_alloc(inst, 2091 loader.loaded_layer_lib_capacity, 2092 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 2093 if (!new_layer_lib_list) { 2094 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "loader: heap alloc failed loader_remove_layer_library"); 2095 return; 2096 } 2097 2098 if (idx > 0) { 2099 /* Copy records before idx */ 2100 memcpy(new_layer_lib_list, &loader.loaded_layer_lib_list[0], 2101 sizeof(struct loader_lib_info) * idx); 2102 } 2103 if (idx < (loader.loaded_layer_lib_count - 1)) { 2104 /* Copy records after idx */ 2105 memcpy(&new_layer_lib_list[idx], &loader.loaded_layer_lib_list[idx+1], 2106 sizeof(struct loader_lib_info) * (loader.loaded_layer_lib_count - idx - 1)); 2107 } 2108 2109 loader_heap_free(inst, loader.loaded_layer_lib_list); 2110 loader.loaded_layer_lib_count--; 2111 loader.loaded_layer_lib_list = new_layer_lib_list; 2112} 2113 2114 2115/** 2116 * Go through the search_list and find any layers which match type. If layer 2117 * type match is found in then add it to ext_list. 2118 */ 2119//TODO need to handle implict layer enable env var and disable env var 2120static void loader_add_layer_implicit( 2121 const struct loader_instance *inst, 2122 const enum layer_type type, 2123 struct loader_layer_list *list, 2124 const struct loader_layer_list *search_list) 2125{ 2126 uint32_t i; 2127 for (i = 0; i < search_list->count; i++) { 2128 const struct loader_layer_properties *prop = &search_list->list[i]; 2129 if (prop->type & type) { 2130 /* Found an layer with the same type, add to layer_list */ 2131 loader_add_to_layer_list(inst, list, 1, prop); 2132 } 2133 } 2134 2135} 2136 2137/** 2138 * Get the layer name(s) from the env_name environment variable. If layer 2139 * is found in search_list then add it to layer_list. But only add it to 2140 * layer_list if type matches. 2141 */ 2142static void loader_add_layer_env( 2143 const struct loader_instance *inst, 2144 const enum layer_type type, 2145 const char *env_name, 2146 struct loader_layer_list *layer_list, 2147 const struct loader_layer_list *search_list) 2148{ 2149 char *layerEnv; 2150 char *next, *name; 2151 2152 layerEnv = getenv(env_name); 2153 if (layerEnv == NULL) { 2154 return; 2155 } 2156 name = loader_stack_alloc(strlen(layerEnv) + 1); 2157 if (name == NULL) { 2158 return; 2159 } 2160 strcpy(name, layerEnv); 2161 2162 while (name && *name ) { 2163 next = loader_get_next_path(name); 2164 loader_find_layer_name_add_list(inst, name, type, search_list, layer_list); 2165 name = next; 2166 } 2167 2168 return; 2169} 2170 2171void loader_deactivate_instance_layers(struct loader_instance *instance) 2172{ 2173 if (!instance->activated_layer_list.count) { 2174 return; 2175 } 2176 2177 /* Create instance chain of enabled layers */ 2178 for (uint32_t i = 0; i < instance->activated_layer_list.count; i++) { 2179 struct loader_layer_properties *layer_prop = &instance->activated_layer_list.list[i]; 2180 2181 loader_remove_layer_lib(instance, layer_prop); 2182 } 2183 loader_destroy_layer_list(instance, &instance->activated_layer_list); 2184} 2185 2186VkResult loader_enable_instance_layers( 2187 struct loader_instance *inst, 2188 const VkInstanceCreateInfo *pCreateInfo, 2189 const struct loader_layer_list *instance_layers) 2190{ 2191 VkResult err; 2192 2193 if (inst == NULL) 2194 return VK_ERROR_UNKNOWN; 2195 2196 if (!loader_init_layer_list(inst, &inst->activated_layer_list)) { 2197 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Instance activated layer list"); 2198 return VK_ERROR_OUT_OF_HOST_MEMORY; 2199 } 2200 2201 /* Add any implicit layers first */ 2202 loader_add_layer_implicit( 2203 inst, 2204 VK_LAYER_TYPE_INSTANCE_IMPLICIT, 2205 &inst->activated_layer_list, 2206 instance_layers); 2207 2208 /* Add any layers specified via environment variable next */ 2209 loader_add_layer_env( 2210 inst, 2211 VK_LAYER_TYPE_INSTANCE_EXPLICIT, 2212 "VK_INSTANCE_LAYERS", 2213 &inst->activated_layer_list, 2214 instance_layers); 2215 2216 /* Add layers specified by the application */ 2217 err = loader_add_layer_names_to_list( 2218 inst, 2219 &inst->activated_layer_list, 2220 pCreateInfo->layerCount, 2221 pCreateInfo->ppEnabledLayerNames, 2222 instance_layers); 2223 2224 return err; 2225} 2226 2227uint32_t loader_activate_instance_layers(struct loader_instance *inst) 2228{ 2229 uint32_t layer_idx; 2230 VkBaseLayerObject *wrappedInstance; 2231 2232 if (inst == NULL) { 2233 return 0; 2234 } 2235 2236 // NOTE inst is unwrapped at this point in time 2237 void* baseObj = (void*) inst; 2238 void* nextObj = (void*) inst; 2239 VkBaseLayerObject *nextInstObj; 2240 PFN_vkGetInstanceProcAddr nextGPA = loader_gpa_instance_internal; 2241 2242 if (!inst->activated_layer_list.count) { 2243 return 0; 2244 } 2245 2246 wrappedInstance = loader_stack_alloc(sizeof(VkBaseLayerObject) 2247 * inst->activated_layer_list.count); 2248 if (!wrappedInstance) { 2249 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Instance objects for layer"); 2250 return 0; 2251 } 2252 2253 /* Create instance chain of enabled layers */ 2254 layer_idx = inst->activated_layer_list.count - 1; 2255 for (int32_t i = inst->activated_layer_list.count - 1; i >= 0; i--) { 2256 struct loader_layer_properties *layer_prop = &inst->activated_layer_list.list[i]; 2257 loader_platform_dl_handle lib_handle; 2258 2259 /* 2260 * Note: An extension's Get*ProcAddr should not return a function pointer for 2261 * any extension entry points until the extension has been enabled. 2262 * To do this requires a different behavior from Get*ProcAddr functions implemented 2263 * in layers. 2264 * The very first call to a layer will be it's Get*ProcAddr function requesting 2265 * the layer's vkGet*ProcAddr. The layer should intialize it's internal dispatch table 2266 * with the wrapped object given (either Instance or Device) and return the layer's 2267 * Get*ProcAddr function. The layer should also use this opportunity to record the 2268 * baseObject so that it can find the correct local dispatch table on future calls. 2269 * Subsequent calls to Get*ProcAddr, CreateInstance, CreateDevice 2270 * will not use a wrapped object and must look up their local dispatch table from 2271 * the given baseObject. 2272 */ 2273 nextInstObj = (wrappedInstance + layer_idx); 2274 nextInstObj->pGPA = (PFN_vkGPA) nextGPA; 2275 nextInstObj->baseObject = baseObj; 2276 nextInstObj->nextObject = nextObj; 2277 nextObj = (void*) nextInstObj; 2278 2279 lib_handle = loader_add_layer_lib(inst, "instance", layer_prop); 2280 if ((nextGPA = layer_prop->functions.get_instance_proc_addr) == NULL) { 2281 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) { 2282 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); 2283 layer_prop->functions.get_instance_proc_addr = nextGPA; 2284 } else 2285 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa); 2286 if (!nextGPA) { 2287 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to find vkGetInstanceProcAddr in layer %s", layer_prop->lib_name); 2288 2289 /* TODO: Should we return nextObj, nextGPA to previous? or decrement layer_list count*/ 2290 continue; 2291 } 2292 } 2293 2294 loader_log(VK_DBG_REPORT_INFO_BIT, 0, 2295 "Insert instance layer %s (%s)", 2296 layer_prop->info.layerName, 2297 layer_prop->lib_name); 2298 2299 layer_idx--; 2300 } 2301 2302 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2303 2304 return inst->activated_layer_list.count; 2305} 2306 2307void loader_activate_instance_layer_extensions(struct loader_instance *inst) 2308{ 2309 2310 loader_init_instance_extension_dispatch_table(inst->disp, 2311 inst->disp->GetInstanceProcAddr, 2312 (VkInstance) inst); 2313} 2314 2315static VkResult loader_enable_device_layers( 2316 const struct loader_instance *inst, 2317 struct loader_icd *icd, 2318 struct loader_device *dev, 2319 const VkDeviceCreateInfo *pCreateInfo, 2320 const struct loader_layer_list *device_layers) 2321 2322{ 2323 VkResult err; 2324 2325 if (dev == NULL) 2326 return VK_ERROR_UNKNOWN; 2327 2328 if (dev->activated_layer_list.list == NULL || dev->activated_layer_list.capacity == 0) { 2329 loader_init_layer_list(inst, &dev->activated_layer_list); 2330 } 2331 2332 if (dev->activated_layer_list.list == NULL) { 2333 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc device activated layer list"); 2334 return VK_ERROR_OUT_OF_HOST_MEMORY; 2335 } 2336 2337 /* Add any implicit layers first */ 2338 loader_add_layer_implicit( 2339 inst, 2340 VK_LAYER_TYPE_DEVICE_IMPLICIT, 2341 &dev->activated_layer_list, 2342 device_layers); 2343 2344 /* Add any layers specified via environment variable next */ 2345 loader_add_layer_env( 2346 inst, 2347 VK_LAYER_TYPE_DEVICE_EXPLICIT, 2348 "VK_DEVICE_LAYERS", 2349 &dev->activated_layer_list, 2350 device_layers); 2351 2352 /* Add layers specified by the application */ 2353 err = loader_add_layer_names_to_list( 2354 inst, 2355 &dev->activated_layer_list, 2356 pCreateInfo->layerCount, 2357 pCreateInfo->ppEnabledLayerNames, 2358 device_layers); 2359 2360 return err; 2361} 2362 2363/* 2364 * This function terminates the device chain for CreateDevice. 2365 * CreateDevice is a special case and so the loader call's 2366 * the ICD's CreateDevice before creating the chain. Since 2367 * we can't call CreateDevice twice we must terminate the 2368 * device chain with something else. 2369 */ 2370static VkResult VKAPI scratch_vkCreateDevice( 2371 VkPhysicalDevice gpu, 2372 const VkDeviceCreateInfo *pCreateInfo, 2373 VkDevice *pDevice) 2374{ 2375 return VK_SUCCESS; 2376} 2377 2378static PFN_vkVoidFunction VKAPI loader_GetDeviceChainProcAddr(VkDevice device, const char * name) 2379{ 2380 if (!strcmp(name, "vkGetDeviceProcAddr")) 2381 return (PFN_vkVoidFunction) loader_GetDeviceChainProcAddr; 2382 if (!strcmp(name, "vkCreateDevice")) 2383 return (PFN_vkVoidFunction) scratch_vkCreateDevice; 2384 2385 struct loader_device *found_dev; 2386 struct loader_icd *icd = loader_get_icd_and_device(device, &found_dev); 2387 return icd->GetDeviceProcAddr(device, name); 2388} 2389 2390static uint32_t loader_activate_device_layers( 2391 const struct loader_instance *inst, 2392 struct loader_device *dev, 2393 VkDevice device) 2394{ 2395 if (!dev) { 2396 return 0; 2397 } 2398 2399 /* activate any layer libraries */ 2400 void* nextObj = (void*) device; 2401 void* baseObj = nextObj; 2402 VkBaseLayerObject *nextGpuObj; 2403 PFN_vkGetDeviceProcAddr nextGPA = loader_GetDeviceChainProcAddr; 2404 VkBaseLayerObject *wrappedGpus; 2405 2406 if (!dev->activated_layer_list.count) 2407 return 0; 2408 2409 wrappedGpus = loader_heap_alloc(inst, 2410 sizeof (VkBaseLayerObject) * dev->activated_layer_list.count, 2411 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 2412 if (!wrappedGpus) { 2413 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Gpu objects for layer"); 2414 return 0; 2415 } 2416 2417 for (int32_t i = dev->activated_layer_list.count - 1; i >= 0; i--) { 2418 2419 struct loader_layer_properties *layer_prop = &dev->activated_layer_list.list[i]; 2420 loader_platform_dl_handle lib_handle; 2421 2422 nextGpuObj = (wrappedGpus + i); 2423 nextGpuObj->pGPA = (PFN_vkGPA)nextGPA; 2424 nextGpuObj->baseObject = baseObj; 2425 nextGpuObj->nextObject = nextObj; 2426 nextObj = (void*) nextGpuObj; 2427 2428 lib_handle = loader_add_layer_lib(inst, "device", layer_prop); 2429 if ((nextGPA = layer_prop->functions.get_device_proc_addr) == NULL) { 2430 if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) { 2431 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr"); 2432 layer_prop->functions.get_device_proc_addr = nextGPA; 2433 } else 2434 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa); 2435 if (!nextGPA) { 2436 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to find vkGetDeviceProcAddr in layer %s", layer_prop->lib_name); 2437 continue; 2438 } 2439 } 2440 2441 loader_log(VK_DBG_REPORT_INFO_BIT, 0, 2442 "Insert device layer library %s (%s)", 2443 layer_prop->info.layerName, 2444 layer_prop->lib_name); 2445 2446 } 2447 2448 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2449 (VkDevice) nextObj, (VkDevice) baseObj); 2450 loader_heap_free(inst, wrappedGpus); 2451 2452 return dev->activated_layer_list.count; 2453} 2454 2455VkResult loader_validate_layers( 2456 const uint32_t layer_count, 2457 const char * const *ppEnabledLayerNames, 2458 const struct loader_layer_list *list) 2459{ 2460 struct loader_layer_properties *prop; 2461 2462 for (uint32_t i = 0; i < layer_count; i++) { 2463 prop = loader_get_layer_property(ppEnabledLayerNames[i], 2464 list); 2465 if (!prop) { 2466 return VK_ERROR_INVALID_LAYER; 2467 } 2468 } 2469 2470 return VK_SUCCESS; 2471} 2472 2473VkResult loader_validate_instance_extensions( 2474 const struct loader_extension_list *icd_exts, 2475 const struct loader_layer_list *instance_layer, 2476 const VkInstanceCreateInfo *pCreateInfo) 2477{ 2478 VkExtensionProperties *extension_prop; 2479 struct loader_layer_properties *layer_prop; 2480 2481 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) { 2482 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2483 icd_exts); 2484 2485 if (extension_prop) { 2486 continue; 2487 } 2488 2489 extension_prop = NULL; 2490 2491 /* Not in global list, search layer extension lists */ 2492 for (uint32_t j = 0; j < pCreateInfo->layerCount; j++) { 2493 layer_prop = loader_get_layer_property(pCreateInfo->ppEnabledLayerNames[i], 2494 instance_layer); 2495 if (!layer_prop) { 2496 /* Should NOT get here, loader_validate_layers 2497 * should have already filtered this case out. 2498 */ 2499 continue; 2500 } 2501 2502 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2503 &layer_prop->instance_extension_list); 2504 if (extension_prop) { 2505 /* Found the extension in one of the layers enabled by the app. */ 2506 break; 2507 } 2508 } 2509 2510 if (!extension_prop) { 2511 /* Didn't find extension name in any of the global layers, error out */ 2512 return VK_ERROR_INVALID_EXTENSION; 2513 } 2514 } 2515 return VK_SUCCESS; 2516} 2517 2518VkResult loader_validate_device_extensions( 2519 struct loader_icd *icd, 2520 uint32_t gpu_index, 2521 const struct loader_layer_list *device_layer, 2522 const VkDeviceCreateInfo *pCreateInfo) 2523{ 2524 VkExtensionProperties *extension_prop; 2525 struct loader_layer_properties *layer_prop; 2526 2527 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) { 2528 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 2529 extension_prop = get_extension_property(extension_name, 2530 &icd->device_extension_cache[gpu_index]); 2531 2532 if (extension_prop) { 2533 continue; 2534 } 2535 2536 /* Not in global list, search layer extension lists */ 2537 for (uint32_t j = 0; j < pCreateInfo->layerCount; j++) { 2538 const char *layer_name = pCreateInfo->ppEnabledLayerNames[j]; 2539 layer_prop = loader_get_layer_property(layer_name, 2540 device_layer); 2541 2542 if (!layer_prop) { 2543 /* Should NOT get here, loader_validate_instance_layers 2544 * should have already filtered this case out. 2545 */ 2546 continue; 2547 } 2548 2549 extension_prop = get_extension_property(extension_name, 2550 &layer_prop->device_extension_list); 2551 if (extension_prop) { 2552 /* Found the extension in one of the layers enabled by the app. */ 2553 break; 2554 } 2555 } 2556 2557 if (!extension_prop) { 2558 /* Didn't find extension name in any of the device layers, error out */ 2559 return VK_ERROR_INVALID_EXTENSION; 2560 } 2561 } 2562 return VK_SUCCESS; 2563} 2564 2565VkResult VKAPI loader_CreateInstance( 2566 const VkInstanceCreateInfo* pCreateInfo, 2567 VkInstance* pInstance) 2568{ 2569 struct loader_instance *ptr_instance = *(struct loader_instance **) pInstance; 2570 struct loader_icd *icd; 2571 VkExtensionProperties *prop; 2572 char **filtered_extension_names = NULL; 2573 VkInstanceCreateInfo icd_create_info; 2574 VkResult res = VK_SUCCESS; 2575 bool success; 2576 2577 icd_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; 2578 icd_create_info.layerCount = 0; 2579 icd_create_info.ppEnabledLayerNames = NULL; 2580 icd_create_info.pAllocCb = pCreateInfo->pAllocCb; 2581 icd_create_info.pAppInfo = pCreateInfo->pAppInfo; 2582 icd_create_info.pNext = pCreateInfo->pNext; 2583 2584 /* 2585 * NOTE: Need to filter the extensions to only those 2586 * supported by the ICD. 2587 * No ICD will advertise support for layers. An ICD 2588 * library could support a layer, but it would be 2589 * independent of the actual ICD, just in the same library. 2590 */ 2591 filtered_extension_names = loader_stack_alloc(pCreateInfo->extensionCount * sizeof(char *)); 2592 if (!filtered_extension_names) { 2593 return VK_ERROR_OUT_OF_HOST_MEMORY; 2594 } 2595 icd_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 2596 2597 for (uint32_t i = 0; i < ptr_instance->icd_libs.count; i++) { 2598 icd = loader_icd_add(ptr_instance, &ptr_instance->icd_libs.list[i]); 2599 if (icd) { 2600 icd_create_info.extensionCount = 0; 2601 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) { 2602 prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2603 &ptr_instance->ext_list); 2604 if (prop) { 2605 filtered_extension_names[icd_create_info.extensionCount] = (char *) pCreateInfo->ppEnabledExtensionNames[i]; 2606 icd_create_info.extensionCount++; 2607 } 2608 } 2609 2610 res = ptr_instance->icd_libs.list[i].CreateInstance(&icd_create_info, 2611 &(icd->instance)); 2612 success = loader_icd_init_entrys( 2613 icd, 2614 icd->instance, 2615 ptr_instance->icd_libs.list[i].GetInstanceProcAddr); 2616 2617 if (res != VK_SUCCESS || !success) 2618 { 2619 ptr_instance->icds = ptr_instance->icds->next; 2620 loader_icd_destroy(ptr_instance, icd); 2621 icd->instance = VK_NULL_HANDLE; 2622 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2623 "ICD ignored: failed to CreateInstance and find entrypoints with ICD"); 2624 } 2625 } 2626 } 2627 2628 /* 2629 * If no ICDs were added to instance list and res is unchanged 2630 * from it's initial value, the loader was unable to find 2631 * a suitable ICD. 2632 */ 2633 if (ptr_instance->icds == NULL) { 2634 if (res == VK_SUCCESS) { 2635 return VK_ERROR_INCOMPATIBLE_DRIVER; 2636 } else { 2637 return res; 2638 } 2639 } 2640 2641 return VK_SUCCESS; 2642} 2643 2644VkResult VKAPI loader_DestroyInstance( 2645 VkInstance instance) 2646{ 2647 struct loader_instance *ptr_instance = loader_instance(instance); 2648 struct loader_icd *icds = ptr_instance->icds; 2649 struct loader_icd *next_icd; 2650 VkResult res; 2651 2652 // Remove this instance from the list of instances: 2653 struct loader_instance *prev = NULL; 2654 struct loader_instance *next = loader.instances; 2655 while (next != NULL) { 2656 if (next == ptr_instance) { 2657 // Remove this instance from the list: 2658 if (prev) 2659 prev->next = next->next; 2660 else 2661 loader.instances = next->next; 2662 break; 2663 } 2664 prev = next; 2665 next = next->next; 2666 } 2667 if (next == NULL) { 2668 // This must be an invalid instance handle or empty list 2669 return VK_ERROR_INVALID_HANDLE; 2670 } 2671 2672 while (icds) { 2673 if (icds->instance) { 2674 res = icds->DestroyInstance(icds->instance); 2675 if (res != VK_SUCCESS) 2676 loader_log(VK_DBG_REPORT_WARN_BIT, 0, 2677 "ICD ignored: failed to DestroyInstance on device"); 2678 } 2679 next_icd = icds->next; 2680 icds->instance = VK_NULL_HANDLE; 2681 loader_icd_destroy(ptr_instance, icds); 2682 2683 icds = next_icd; 2684 } 2685 loader_delete_layer_properties(ptr_instance, &ptr_instance->device_layer_list); 2686 loader_delete_layer_properties(ptr_instance, &ptr_instance->instance_layer_list); 2687 loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_libs); 2688 loader_destroy_ext_list(ptr_instance, &ptr_instance->ext_list); 2689 return VK_SUCCESS; 2690} 2691 2692VkResult loader_init_physical_device_info( 2693 struct loader_instance *ptr_instance) 2694{ 2695 struct loader_icd *icd; 2696 uint32_t n, count = 0; 2697 VkResult res = VK_ERROR_UNKNOWN; 2698 2699 icd = ptr_instance->icds; 2700 while (icd) { 2701 res = icd->EnumeratePhysicalDevices(icd->instance, &n, NULL); 2702 if (res != VK_SUCCESS) 2703 return res; 2704 icd->gpu_count = n; 2705 count += n; 2706 icd = icd->next; 2707 } 2708 2709 ptr_instance->total_gpu_count = count; 2710 2711 icd = ptr_instance->icds; 2712 while (icd) { 2713 2714 n = icd->gpu_count; 2715 icd->gpus = (VkPhysicalDevice *) loader_heap_alloc( 2716 ptr_instance, 2717 n * sizeof(VkPhysicalDevice), 2718 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 2719 if (!icd->gpus) { 2720 /* TODO: Add cleanup code here */ 2721 return VK_ERROR_OUT_OF_HOST_MEMORY; 2722 } 2723 res = icd->EnumeratePhysicalDevices( 2724 icd->instance, 2725 &n, 2726 icd->gpus); 2727 if ((res == VK_SUCCESS) && (n == icd->gpu_count)) { 2728 2729 for (unsigned int i = 0; i < n; i++) { 2730 2731 loader_init_dispatch(icd->gpus[i], ptr_instance->disp); 2732 2733 if (!loader_init_ext_list(ptr_instance, &icd->device_extension_cache[i])) { 2734 /* TODO: Add cleanup code here */ 2735 res = VK_ERROR_OUT_OF_HOST_MEMORY; 2736 } 2737 if (res == VK_SUCCESS) { 2738 2739 loader_add_physical_device_extensions( 2740 ptr_instance, 2741 icd->GetPhysicalDeviceExtensionProperties, 2742 icd->gpus[0], 2743 icd->this_icd_lib->lib_name, 2744 &icd->device_extension_cache[i]); 2745 2746 } 2747 2748 if (res != VK_SUCCESS) { 2749 /* clean up any extension lists previously created before this request failed */ 2750 for (uint32_t j = 0; j < i; j++) { 2751 loader_destroy_ext_list( 2752 ptr_instance, 2753 &icd->device_extension_cache[i]); 2754 } 2755 2756 return res; 2757 } 2758 } 2759 2760 count += n; 2761 } 2762 2763 icd = icd->next; 2764 } 2765 2766 return VK_SUCCESS; 2767} 2768 2769VkResult VKAPI loader_EnumeratePhysicalDevices( 2770 VkInstance instance, 2771 uint32_t* pPhysicalDeviceCount, 2772 VkPhysicalDevice* pPhysicalDevices) 2773{ 2774 uint32_t index = 0; 2775 struct loader_instance *ptr_instance = (struct loader_instance *) instance; 2776 struct loader_icd *icd = ptr_instance->icds; 2777 2778 if (ptr_instance->total_gpu_count == 0) { 2779 loader_init_physical_device_info(ptr_instance); 2780 } 2781 2782 *pPhysicalDeviceCount = ptr_instance->total_gpu_count; 2783 if (!pPhysicalDevices) { 2784 return VK_SUCCESS; 2785 } 2786 2787 while (icd) { 2788 assert((index + icd->gpu_count) <= *pPhysicalDeviceCount); 2789 memcpy(&pPhysicalDevices[index], icd->gpus, icd->gpu_count * sizeof(VkPhysicalDevice)); 2790 index += icd->gpu_count; 2791 icd = icd->next; 2792 } 2793 2794 return VK_SUCCESS; 2795} 2796 2797VkResult VKAPI loader_GetPhysicalDeviceProperties( 2798 VkPhysicalDevice gpu, 2799 VkPhysicalDeviceProperties* pProperties) 2800{ 2801 uint32_t gpu_index; 2802 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 2803 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2804 2805 if (icd->GetPhysicalDeviceProperties) 2806 res = icd->GetPhysicalDeviceProperties(gpu, pProperties); 2807 2808 return res; 2809} 2810 2811VkResult VKAPI loader_GetPhysicalDeviceQueueFamilyProperties ( 2812 VkPhysicalDevice gpu, 2813 uint32_t* pCount, 2814 VkQueueFamilyProperties* pProperties) 2815{ 2816 uint32_t gpu_index; 2817 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 2818 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2819 2820 if (icd->GetPhysicalDeviceQueueFamilyProperties) 2821 res = icd->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties); 2822 2823 return res; 2824} 2825 2826VkResult VKAPI loader_GetPhysicalDeviceMemoryProperties ( 2827 VkPhysicalDevice gpu, 2828 VkPhysicalDeviceMemoryProperties* pProperties) 2829{ 2830 uint32_t gpu_index; 2831 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 2832 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2833 2834 if (icd->GetPhysicalDeviceMemoryProperties) 2835 res = icd->GetPhysicalDeviceMemoryProperties(gpu, pProperties); 2836 2837 return res; 2838} 2839 2840VkResult VKAPI loader_GetPhysicalDeviceFeatures( 2841 VkPhysicalDevice physicalDevice, 2842 VkPhysicalDeviceFeatures* pFeatures) 2843{ 2844 uint32_t gpu_index; 2845 struct loader_icd *icd = loader_get_icd(physicalDevice, &gpu_index); 2846 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2847 2848 if (icd->GetPhysicalDeviceFeatures) 2849 res = icd->GetPhysicalDeviceFeatures(physicalDevice, pFeatures); 2850 2851 return res; 2852} 2853 2854VkResult VKAPI loader_GetPhysicalDeviceFormatProperties( 2855 VkPhysicalDevice physicalDevice, 2856 VkFormat format, 2857 VkFormatProperties* pFormatInfo) 2858{ 2859 uint32_t gpu_index; 2860 struct loader_icd *icd = loader_get_icd(physicalDevice, &gpu_index); 2861 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2862 2863 if (icd->GetPhysicalDeviceFormatProperties) 2864 res = icd->GetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatInfo); 2865 2866 return res; 2867} 2868 2869VkResult VKAPI loader_GetPhysicalDeviceImageFormatProperties( 2870 VkPhysicalDevice physicalDevice, 2871 VkFormat format, 2872 VkImageType type, 2873 VkImageTiling tiling, 2874 VkImageUsageFlags usage, 2875 VkImageFormatProperties* pImageFormatProperties) 2876{ 2877 uint32_t gpu_index; 2878 struct loader_icd *icd = loader_get_icd(physicalDevice, &gpu_index); 2879 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2880 2881 if (icd->GetPhysicalDeviceImageFormatProperties) 2882 res = icd->GetPhysicalDeviceImageFormatProperties(physicalDevice, format, 2883 type, tiling, usage, pImageFormatProperties); 2884 2885 return res; 2886} 2887 2888VkResult VKAPI loader_GetPhysicalDeviceSparseImageFormatProperties( 2889 VkPhysicalDevice physicalDevice, 2890 VkFormat format, 2891 VkImageType type, 2892 uint32_t samples, 2893 VkImageUsageFlags usage, 2894 VkImageTiling tiling, 2895 uint32_t* pNumProperties, 2896 VkSparseImageFormatProperties* pProperties) 2897{ 2898 uint32_t gpu_index; 2899 struct loader_icd *icd = loader_get_icd(physicalDevice, &gpu_index); 2900 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2901 2902 if (icd->GetPhysicalDeviceSparseImageFormatProperties) 2903 res = icd->GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pNumProperties, pProperties); 2904 2905 return res; 2906} 2907 2908VkResult VKAPI loader_CreateDevice( 2909 VkPhysicalDevice gpu, 2910 const VkDeviceCreateInfo* pCreateInfo, 2911 VkDevice* pDevice) 2912{ 2913 uint32_t gpu_index; 2914 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 2915 struct loader_device *dev; 2916 const struct loader_instance *inst = icd->this_instance; 2917 VkDeviceCreateInfo device_create_info; 2918 char **filtered_extension_names = NULL; 2919 VkResult res; 2920 2921 if (!icd->CreateDevice) { 2922 return VK_ERROR_INITIALIZATION_FAILED; 2923 } 2924 2925 /* validate any app enabled layers are available */ 2926 if (pCreateInfo->layerCount > 0) { 2927 res = loader_validate_layers(pCreateInfo->layerCount, 2928 pCreateInfo->ppEnabledLayerNames, 2929 &inst->device_layer_list); 2930 if (res != VK_SUCCESS) { 2931 return res; 2932 } 2933 } 2934 2935 res = loader_validate_device_extensions(icd, gpu_index, &inst->device_layer_list, pCreateInfo); 2936 if (res != VK_SUCCESS) { 2937 return res; 2938 } 2939 2940 /* 2941 * NOTE: Need to filter the extensions to only those 2942 * supported by the ICD. 2943 * No ICD will advertise support for layers. An ICD 2944 * library could support a layer, but it would be 2945 * independent of the actual ICD, just in the same library. 2946 */ 2947 filtered_extension_names = loader_stack_alloc(pCreateInfo->extensionCount * sizeof(char *)); 2948 if (!filtered_extension_names) { 2949 return VK_ERROR_OUT_OF_HOST_MEMORY; 2950 } 2951 2952 /* Copy user's data */ 2953 memcpy(&device_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo)); 2954 2955 /* ICD's do not use layers */ 2956 device_create_info.layerCount = 0; 2957 device_create_info.ppEnabledLayerNames = NULL; 2958 2959 device_create_info.extensionCount = 0; 2960 device_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 2961 2962 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) { 2963 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 2964 VkExtensionProperties *prop = get_extension_property(extension_name, 2965 &icd->device_extension_cache[gpu_index]); 2966 if (prop) { 2967 filtered_extension_names[device_create_info.extensionCount] = (char *) extension_name; 2968 device_create_info.extensionCount++; 2969 } 2970 } 2971 2972 res = icd->CreateDevice(gpu, pCreateInfo, pDevice); 2973 if (res != VK_SUCCESS) { 2974 return res; 2975 } 2976 2977 dev = loader_add_logical_device(inst, *pDevice, &icd->logical_device_list); 2978 if (dev == NULL) { 2979 return VK_ERROR_OUT_OF_HOST_MEMORY; 2980 } 2981 PFN_vkGetDeviceProcAddr get_proc_addr = icd->GetDeviceProcAddr; 2982 loader_init_device_dispatch_table(&dev->loader_dispatch, get_proc_addr, 2983 *pDevice, *pDevice); 2984 2985 dev->loader_dispatch.CreateDevice = scratch_vkCreateDevice; 2986 loader_init_dispatch(*pDevice, &dev->loader_dispatch); 2987 2988 /* activate any layers on device chain which terminates with device*/ 2989 res = loader_enable_device_layers(inst, icd, dev, pCreateInfo, &inst->device_layer_list); 2990 if (res != VK_SUCCESS) { 2991 loader_destroy_logical_device(inst, dev); 2992 return res; 2993 } 2994 loader_activate_device_layers(inst, dev, *pDevice); 2995 2996 res = dev->loader_dispatch.CreateDevice(gpu, pCreateInfo, pDevice); 2997 2998 dev->loader_dispatch.CreateDevice = icd->CreateDevice; 2999 3000 return res; 3001} 3002 3003static PFN_vkVoidFunction VKAPI loader_GetInstanceProcAddr(VkInstance instance, const char * pName) 3004{ 3005 if (instance == VK_NULL_HANDLE) 3006 return NULL; 3007 3008 void *addr; 3009 /* get entrypoint addresses that are global (in the loader)*/ 3010 addr = globalGetProcAddr(pName); 3011 if (addr) 3012 return addr; 3013 3014 struct loader_instance *ptr_instance = (struct loader_instance *) instance; 3015 3016 /* return any extension global entrypoints */ 3017 addr = debug_report_instance_gpa(ptr_instance, pName); 3018 if (addr) { 3019 return addr; 3020 } 3021 3022 addr = wsi_swapchain_GetInstanceProcAddr(ptr_instance, pName); 3023 if (addr) { 3024 return addr; 3025 } 3026 3027 /* return the instance dispatch table entrypoint for extensions */ 3028 const VkLayerInstanceDispatchTable *disp_table = * (VkLayerInstanceDispatchTable **) instance; 3029 if (disp_table == NULL) 3030 return NULL; 3031 3032 addr = loader_lookup_instance_dispatch_table(disp_table, pName); 3033 if (addr) 3034 return addr; 3035 3036 return NULL; 3037} 3038 3039LOADER_EXPORT PFN_vkVoidFunction VKAPI vkGetInstanceProcAddr(VkInstance instance, const char * pName) 3040{ 3041 return loader_GetInstanceProcAddr(instance, pName); 3042} 3043 3044static PFN_vkVoidFunction VKAPI loader_GetDeviceProcAddr(VkDevice device, const char * pName) 3045{ 3046 if (device == VK_NULL_HANDLE) { 3047 return NULL; 3048 } 3049 3050 void *addr; 3051 3052 /* for entrypoints that loader must handle (ie non-dispatchable or create object) 3053 make sure the loader entrypoint is returned */ 3054 addr = loader_non_passthrough_gpa(pName); 3055 if (addr) { 3056 return addr; 3057 } 3058 3059 /* return the dispatch table entrypoint for the fastest case */ 3060 const VkLayerDispatchTable *disp_table = * (VkLayerDispatchTable **) device; 3061 if (disp_table == NULL) 3062 return NULL; 3063 3064 addr = loader_lookup_device_dispatch_table(disp_table, pName); 3065 if (addr) 3066 return addr; 3067 else { 3068 if (disp_table->GetDeviceProcAddr == NULL) 3069 return NULL; 3070 return disp_table->GetDeviceProcAddr(device, pName); 3071 } 3072} 3073 3074LOADER_EXPORT PFN_vkVoidFunction VKAPI vkGetDeviceProcAddr(VkDevice device, const char * pName) 3075{ 3076 return loader_GetDeviceProcAddr(device, pName); 3077} 3078 3079LOADER_EXPORT VkResult VKAPI vkGetGlobalExtensionProperties( 3080 const char* pLayerName, 3081 uint32_t* pCount, 3082 VkExtensionProperties* pProperties) 3083{ 3084 struct loader_extension_list *global_ext_list; 3085 struct loader_layer_list instance_layers; 3086 struct loader_extension_list icd_extensions; 3087 struct loader_icd_libs icd_libs; 3088 uint32_t copy_size; 3089 3090 tls_instance = NULL; 3091 if (pCount == NULL) { 3092 return VK_ERROR_INVALID_POINTER; 3093 } 3094 3095 memset(&icd_extensions, 0, sizeof(icd_extensions)); 3096 loader_platform_thread_once(&once_init, loader_initialize); 3097 3098 //TODO do we still need to lock? for loader.global_extensions 3099 loader_platform_thread_lock_mutex(&loader_lock); 3100 /* get layer libraries if needed */ 3101 if (pLayerName && strlen(pLayerName) != 0) { 3102 memset(&instance_layers, 0, sizeof(instance_layers)); 3103 loader_layer_scan(NULL, &instance_layers, NULL); 3104 for (uint32_t i = 0; i < instance_layers.count; i++) { 3105 struct loader_layer_properties *props = &instance_layers.list[i]; 3106 if (strcmp(props->info.layerName, pLayerName) == 0) { 3107 global_ext_list = &props->instance_extension_list; 3108 } 3109 } 3110 loader_destroy_layer_list(NULL, &instance_layers); 3111 } 3112 else { 3113 /* Scan/discover all ICD libraries */ 3114 memset(&icd_libs, 0 , sizeof(struct loader_icd_libs)); 3115 loader_icd_scan(NULL, &icd_libs); 3116 /* get extensions from all ICD's, merge so no duplicates */ 3117 loader_get_icd_loader_instance_extensions(NULL, &icd_libs, &icd_extensions); 3118 loader_scanned_icd_clear(NULL, &icd_libs); 3119 global_ext_list = &icd_extensions; 3120 } 3121 3122 if (global_ext_list == NULL) { 3123 loader_platform_thread_unlock_mutex(&loader_lock); 3124 return VK_ERROR_INVALID_LAYER; 3125 } 3126 3127 if (pProperties == NULL) { 3128 *pCount = global_ext_list->count; 3129 loader_destroy_ext_list(NULL, &icd_extensions); 3130 loader_platform_thread_unlock_mutex(&loader_lock); 3131 return VK_SUCCESS; 3132 } 3133 3134 copy_size = *pCount < global_ext_list->count ? *pCount : global_ext_list->count; 3135 for (uint32_t i = 0; i < copy_size; i++) { 3136 memcpy(&pProperties[i], 3137 &global_ext_list->list[i], 3138 sizeof(VkExtensionProperties)); 3139 } 3140 *pCount = copy_size; 3141 loader_destroy_ext_list(NULL, &icd_extensions); 3142 loader_platform_thread_unlock_mutex(&loader_lock); 3143 3144 if (copy_size < global_ext_list->count) { 3145 return VK_INCOMPLETE; 3146 } 3147 3148 return VK_SUCCESS; 3149} 3150 3151LOADER_EXPORT VkResult VKAPI vkGetGlobalLayerProperties( 3152 uint32_t* pCount, 3153 VkLayerProperties* pProperties) 3154{ 3155 3156 struct loader_layer_list instance_layer_list; 3157 tls_instance = NULL; 3158 3159 loader_platform_thread_once(&once_init, loader_initialize); 3160 3161 uint32_t copy_size; 3162 3163 if (pCount == NULL) { 3164 return VK_ERROR_INVALID_POINTER; 3165 } 3166 3167 /* TODO: do we still need to lock */ 3168 loader_platform_thread_lock_mutex(&loader_lock); 3169 3170 /* get layer libraries */ 3171 memset(&instance_layer_list, 0, sizeof(instance_layer_list)); 3172 loader_layer_scan(NULL, &instance_layer_list, NULL); 3173 3174 if (pProperties == NULL) { 3175 *pCount = instance_layer_list.count; 3176 loader_destroy_layer_list(NULL, &instance_layer_list); 3177 loader_platform_thread_unlock_mutex(&loader_lock); 3178 return VK_SUCCESS; 3179 } 3180 3181 copy_size = (*pCount < instance_layer_list.count) ? *pCount : instance_layer_list.count; 3182 for (uint32_t i = 0; i < copy_size; i++) { 3183 memcpy(&pProperties[i], &instance_layer_list.list[i].info, sizeof(VkLayerProperties)); 3184 } 3185 *pCount = copy_size; 3186 loader_destroy_layer_list(NULL, &instance_layer_list); 3187 loader_platform_thread_unlock_mutex(&loader_lock); 3188 3189 if (copy_size < instance_layer_list.count) { 3190 return VK_INCOMPLETE; 3191 } 3192 3193 return VK_SUCCESS; 3194} 3195 3196VkResult VKAPI loader_GetPhysicalDeviceExtensionProperties( 3197 VkPhysicalDevice gpu, 3198 const char* pLayerName, 3199 uint32_t* pCount, 3200 VkExtensionProperties* pProperties) 3201{ 3202 uint32_t gpu_index; 3203 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 3204 uint32_t copy_size; 3205 3206 if (pCount == NULL) { 3207 return VK_ERROR_INVALID_POINTER; 3208 } 3209 3210 uint32_t count; 3211 struct loader_extension_list *dev_ext_list; 3212 3213 /* get layer libraries if needed */ 3214 if (pLayerName && strlen(pLayerName) != 0) { 3215 for (uint32_t i = 0; i < icd->this_instance->device_layer_list.count; i++) { 3216 struct loader_layer_properties *props = &icd->this_instance->device_layer_list.list[i]; 3217 if (strcmp(props->info.layerName, pLayerName) == 0) { 3218 dev_ext_list = &props->device_extension_list; 3219 } 3220 } 3221 } 3222 else { 3223 dev_ext_list = &icd->device_extension_cache[gpu_index]; 3224 } 3225 3226 count = dev_ext_list->count; 3227 if (pProperties == NULL) { 3228 *pCount = count; 3229 return VK_SUCCESS; 3230 } 3231 3232 copy_size = *pCount < count ? *pCount : count; 3233 for (uint32_t i = 0; i < copy_size; i++) { 3234 memcpy(&pProperties[i], 3235 &dev_ext_list->list[i], 3236 sizeof(VkExtensionProperties)); 3237 } 3238 *pCount = copy_size; 3239 3240 if (copy_size < count) { 3241 return VK_INCOMPLETE; 3242 } 3243 3244 return VK_SUCCESS; 3245} 3246 3247VkResult VKAPI loader_GetPhysicalDeviceLayerProperties( 3248 VkPhysicalDevice gpu, 3249 uint32_t* pCount, 3250 VkLayerProperties* pProperties) 3251{ 3252 uint32_t copy_size; 3253 uint32_t gpu_index; 3254 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 3255 3256 if (pCount == NULL) { 3257 return VK_ERROR_INVALID_POINTER; 3258 } 3259 3260 uint32_t count = icd->this_instance->device_layer_list.count; 3261 3262 if (pProperties == NULL) { 3263 *pCount = count; 3264 return VK_SUCCESS; 3265 } 3266 3267 copy_size = (*pCount < count) ? *pCount : count; 3268 for (uint32_t i = 0; i < copy_size; i++) { 3269 memcpy(&pProperties[i], &(icd->this_instance->device_layer_list.list[i].info), sizeof(VkLayerProperties)); 3270 } 3271 *pCount = copy_size; 3272 3273 if (copy_size < count) { 3274 return VK_INCOMPLETE; 3275 } 3276 3277 return VK_SUCCESS; 3278} 3279