loader.c revision 809d5d3fb2d5138ed2c445ac9b0e894872968d33
1/* 2 * 3 * Copyright (C) 2015 Valve Corporation 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included 13 * in all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Author: Chia-I Wu <olvaffe@gmail.com> 24 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com> 25 * Author: Ian Elliott <ian@LunarG.com> 26 * Author: Jon Ashburn <jon@lunarg.com> 27 * 28 */ 29 30#define _GNU_SOURCE 31#include <stdio.h> 32#include <stdlib.h> 33#include <stdarg.h> 34#include <stdbool.h> 35#include <string.h> 36 37#include <sys/types.h> 38#if defined(_WIN32) 39#include "dirent_on_windows.h" 40#else // _WIN32 41#include <dirent.h> 42#endif // _WIN32 43#include "vk_loader_platform.h" 44#include "loader.h" 45#include "gpa_helper.h" 46#include "table_ops.h" 47#include "debug_report.h" 48#include "wsi.h" 49#include "vulkan/vk_icd.h" 50#include "cJSON.h" 51#include "murmurhash.h" 52 53static loader_platform_dl_handle loader_add_layer_lib( 54 const struct loader_instance *inst, 55 const char *chain_type, 56 struct loader_layer_properties *layer_prop); 57 58static void loader_remove_layer_lib( 59 struct loader_instance *inst, 60 struct loader_layer_properties *layer_prop); 61 62struct loader_struct loader = {0}; 63// TLS for instance for alloc/free callbacks 64THREAD_LOCAL_DECL struct loader_instance *tls_instance; 65 66static bool loader_init_ext_list( 67 const struct loader_instance *inst, 68 struct loader_extension_list *ext_info); 69 70static int loader_platform_combine_path(char *dest, int len, ...); 71 72struct loader_phys_dev_per_icd { 73 uint32_t count; 74 VkPhysicalDevice *phys_devs; 75}; 76 77enum loader_debug { 78 LOADER_INFO_BIT = 0x01, 79 LOADER_WARN_BIT = 0x02, 80 LOADER_PERF_BIT = 0x04, 81 LOADER_ERROR_BIT = 0x08, 82 LOADER_DEBUG_BIT = 0x10, 83}; 84 85uint32_t g_loader_debug = 0; 86uint32_t g_loader_log_msgs = 0; 87 88//thread safety lock for accessing global data structures such as "loader" 89// all entrypoints on the instance chain need to be locked except GPA 90// additionally CreateDevice and DestroyDevice needs to be locked 91loader_platform_thread_mutex loader_lock; 92loader_platform_thread_mutex loader_json_lock; 93 94// This table contains the loader's instance dispatch table, which contains 95// default functions if no instance layers are activated. This contains 96// pointers to "terminator functions". 97const VkLayerInstanceDispatchTable instance_disp = { 98 .GetInstanceProcAddr = vkGetInstanceProcAddr, 99 .CreateInstance = loader_CreateInstance, 100 .DestroyInstance = loader_DestroyInstance, 101 .EnumeratePhysicalDevices = loader_EnumeratePhysicalDevices, 102 .GetPhysicalDeviceFeatures = loader_GetPhysicalDeviceFeatures, 103 .GetPhysicalDeviceFormatProperties = loader_GetPhysicalDeviceFormatProperties, 104 .GetPhysicalDeviceImageFormatProperties = loader_GetPhysicalDeviceImageFormatProperties, 105 .GetPhysicalDeviceProperties = loader_GetPhysicalDeviceProperties, 106 .GetPhysicalDeviceQueueFamilyProperties = loader_GetPhysicalDeviceQueueFamilyProperties, 107 .GetPhysicalDeviceMemoryProperties = loader_GetPhysicalDeviceMemoryProperties, 108 .EnumerateDeviceExtensionProperties = loader_EnumerateDeviceExtensionProperties, 109 .EnumerateDeviceLayerProperties = loader_EnumerateDeviceLayerProperties, 110 .GetPhysicalDeviceSparseImageFormatProperties = loader_GetPhysicalDeviceSparseImageFormatProperties, 111 .DestroySurfaceKHR = loader_DestroySurfaceKHR, 112 .GetPhysicalDeviceSurfaceSupportKHR = loader_GetPhysicalDeviceSurfaceSupportKHR, 113 .GetPhysicalDeviceSurfaceCapabilitiesKHR = loader_GetPhysicalDeviceSurfaceCapabilitiesKHR, 114 .GetPhysicalDeviceSurfaceFormatsKHR = loader_GetPhysicalDeviceSurfaceFormatsKHR, 115 .GetPhysicalDeviceSurfacePresentModesKHR = loader_GetPhysicalDeviceSurfacePresentModesKHR, 116 .DbgCreateMsgCallback = loader_DbgCreateMsgCallback, 117 .DbgDestroyMsgCallback = loader_DbgDestroyMsgCallback, 118#ifdef VK_USE_PLATFORM_MIR_KHR 119 .CreateMirSurfaceKHR = loader_CreateMirSurfaceKHR, 120 .GetPhysicalDeviceMirPresentationSupportKHR = loader_GetPhysicalDeviceMirPresentationSupportKHR, 121#endif 122#ifdef VK_USE_PLATFORM_WAYLAND_KHR 123 .CreateWaylandSurfaceKHR = loader_CreateWaylandSurfaceKHR, 124 .GetPhysicalDeviceWaylandPresentationSupportKHR = loader_GetPhysicalDeviceWaylandPresentationSupportKHR, 125#endif 126#ifdef VK_USE_PLATFORM_WIN32_KHR 127 .CreateWin32SurfaceKHR = loader_CreateWin32SurfaceKHR, 128 .GetPhysicalDeviceWin32PresentationSupportKHR = loader_GetPhysicalDeviceWin32PresentationSupportKHR, 129#endif 130#ifdef VK_USE_PLATFORM_XCB_KHR 131 .CreateXcbSurfaceKHR = loader_CreateXcbSurfaceKHR, 132 .GetPhysicalDeviceXcbPresentationSupportKHR = loader_GetPhysicalDeviceXcbPresentationSupportKHR, 133#endif 134#ifdef VK_USE_PLATFORM_XLIB_KHR 135 .CreateXlibSurfaceKHR = loader_CreateXlibSurfaceKHR, 136 .GetPhysicalDeviceXlibPresentationSupportKHR = loader_GetPhysicalDeviceXlibPresentationSupportKHR, 137#endif 138}; 139 140LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init); 141 142void* loader_heap_alloc( 143 const struct loader_instance *instance, 144 size_t size, 145 VkSystemAllocationScope alloc_scope) 146{ 147 if (instance && instance->alloc_callbacks.pfnAllocation) { 148 /* TODO: What should default alignment be? 1, 4, 8, other? */ 149 return instance->alloc_callbacks.pfnAllocation(instance->alloc_callbacks.pUserData, size, 4, alloc_scope); 150 } 151 return malloc(size); 152} 153 154void loader_heap_free( 155 const struct loader_instance *instance, 156 void *pMemory) 157{ 158 if (pMemory == NULL) return; 159 if (instance && instance->alloc_callbacks.pfnFree) { 160 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMemory); 161 return; 162 } 163 free(pMemory); 164} 165 166void* loader_heap_realloc( 167 const struct loader_instance *instance, 168 void *pMemory, 169 size_t orig_size, 170 size_t size, 171 VkSystemAllocationScope alloc_scope) 172{ 173 if (pMemory == NULL || orig_size == 0) 174 return loader_heap_alloc(instance, size, alloc_scope); 175 if (size == 0) { 176 loader_heap_free(instance, pMemory); 177 return NULL; 178 } 179 if (instance && instance->alloc_callbacks.pfnAllocation) { 180 if (size <= orig_size) { 181 memset(((uint8_t *)pMemory) + size, 0, orig_size - size); 182 return pMemory; 183 } 184 void *new_ptr = instance->alloc_callbacks.pfnAllocation(instance->alloc_callbacks.pUserData, size, 4, alloc_scope); 185 if (!new_ptr) 186 return NULL; 187 memcpy(new_ptr, pMemory, orig_size); 188 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMemory); 189 return new_ptr; 190 } 191 return realloc(pMemory, size); 192} 193 194void *loader_tls_heap_alloc(size_t size) 195{ 196 return loader_heap_alloc(tls_instance, size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 197} 198 199void loader_tls_heap_free(void *pMemory) 200{ 201 loader_heap_free(tls_instance, pMemory); 202} 203 204static void loader_log(VkFlags msg_type, int32_t msg_code, 205 const char *format, ...) 206{ 207 char msg[512]; 208 va_list ap; 209 int ret; 210 211 if (!(msg_type & g_loader_log_msgs)) { 212 return; 213 } 214 215 va_start(ap, format); 216 ret = vsnprintf(msg, sizeof(msg), format, ap); 217 if ((ret >= (int) sizeof(msg)) || ret < 0) { 218 msg[sizeof(msg)-1] = '\0'; 219 } 220 va_end(ap); 221 222#if defined(WIN32) 223 OutputDebugString(msg); 224 OutputDebugString("\n"); 225#endif 226 fputs(msg, stderr); 227 fputc('\n', stderr); 228} 229 230#if defined(WIN32) 231static char *loader_get_next_path(char *path); 232/** 233* Find the list of registry files (names within a key) in key "location". 234* 235* This function looks in the registry (hive = DEFAULT_VK_REGISTRY_HIVE) key as given in "location" 236* for a list or name/values which are added to a returned list (function return value). 237* The DWORD values within the key must be 0 or they are skipped. 238* Function return is a string with a ';' separated list of filenames. 239* Function return is NULL if no valid name/value pairs are found in the key, 240* or the key is not found. 241* 242* \returns 243* A string list of filenames as pointer. 244* When done using the returned string list, pointer should be freed. 245*/ 246static char *loader_get_registry_files(const struct loader_instance *inst, char *location) 247{ 248 LONG rtn_value; 249 HKEY hive, key; 250 DWORD access_flags; 251 char name[2048]; 252 char *out = NULL; 253 char *loc = location; 254 char *next; 255 DWORD idx = 0; 256 DWORD name_size = sizeof(name); 257 DWORD value; 258 DWORD total_size = 4096; 259 DWORD value_size = sizeof(value); 260 261 while(*loc) 262 { 263 next = loader_get_next_path(loc); 264 hive = DEFAULT_VK_REGISTRY_HIVE; 265 access_flags = KEY_QUERY_VALUE; 266 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 267 if (rtn_value != ERROR_SUCCESS) { 268 // We didn't find the key. Try the 32-bit hive (where we've seen the 269 // key end up on some people's systems): 270 access_flags |= KEY_WOW64_32KEY; 271 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 272 if (rtn_value != ERROR_SUCCESS) { 273 // We still couldn't find the key, so give up: 274 loc = next; 275 continue; 276 } 277 } 278 279 while ((rtn_value = RegEnumValue(key, idx++, name, &name_size, NULL, NULL, (LPBYTE) &value, &value_size)) == ERROR_SUCCESS) { 280 if (value_size == sizeof(value) && value == 0) { 281 if (out == NULL) { 282 out = loader_heap_alloc(inst, total_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 283 out[0] = '\0'; 284 } 285 else if (strlen(out) + name_size + 1 > total_size) { 286 out = loader_heap_realloc(inst, out, total_size, total_size * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 287 total_size *= 2; 288 } 289 if (out == NULL) { 290 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory, failed loader_get_registry_files"); 291 return NULL; 292 } 293 if (strlen(out) == 0) 294 snprintf(out, name_size + 1, "%s", name); 295 else 296 snprintf(out + strlen(out), name_size + 2, "%c%s", PATH_SEPERATOR, name); 297 } 298 name_size = 2048; 299 } 300 loc = next; 301 } 302 303 return out; 304} 305 306#endif // WIN32 307 308/** 309 * Combine path elements, separating each element with the platform-specific 310 * directory separator, and save the combined string to a destination buffer, 311 * not exceeding the given length. Path elements are given as variadic args, 312 * with a NULL element terminating the list. 313 * 314 * \returns the total length of the combined string, not including an ASCII 315 * NUL termination character. This length may exceed the available storage: 316 * in this case, the written string will be truncated to avoid a buffer 317 * overrun, and the return value will greater than or equal to the storage 318 * size. A NULL argument may be provided as the destination buffer in order 319 * to determine the required string length without actually writing a string. 320 */ 321 322static int loader_platform_combine_path(char *dest, int len, ...) 323{ 324 int required_len = 0; 325 va_list ap; 326 const char *component; 327 328 va_start(ap, len); 329 330 while((component = va_arg(ap, const char *))) { 331 if (required_len > 0) { 332 // This path element is not the first non-empty element; prepend 333 // a directory separator if space allows 334 if (dest && required_len + 1 < len) { 335 snprintf(dest + required_len, len - required_len, "%c", 336 DIRECTORY_SYMBOL); 337 } 338 required_len++; 339 } 340 341 if (dest && required_len < len) { 342 strncpy(dest + required_len, component, len - required_len); 343 } 344 required_len += strlen(component); 345 } 346 347 va_end(ap); 348 349 // strncpy(3) won't add a NUL terminating byte in the event of truncation. 350 if (dest && required_len >= len) { 351 dest[len - 1] = '\0'; 352 } 353 354 return required_len; 355} 356 357 358/** 359 * Given string of three part form "maj.min.pat" convert to a vulkan version 360 * number. 361 */ 362static uint32_t loader_make_version(const char *vers_str) 363{ 364 uint32_t vers = 0, major=0, minor=0, patch=0; 365 char *minor_str= NULL; 366 char *patch_str = NULL; 367 char *cstr; 368 char *str; 369 370 if (!vers_str) 371 return vers; 372 cstr = loader_stack_alloc(strlen(vers_str) + 1); 373 strcpy(cstr, vers_str); 374 while ((str = strchr(cstr, '.')) != NULL) { 375 if (minor_str == NULL) { 376 minor_str = str + 1; 377 *str = '\0'; 378 major = atoi(cstr); 379 } 380 else if (patch_str == NULL) { 381 patch_str = str + 1; 382 *str = '\0'; 383 minor = atoi(minor_str); 384 } 385 else { 386 return vers; 387 } 388 cstr = str + 1; 389 } 390 patch = atoi(patch_str); 391 392 return VK_MAKE_VERSION(major, minor, patch); 393 394} 395 396bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) 397{ 398 return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false; 399} 400 401/** 402 * Search the given ext_array for an extension 403 * matching the given vk_ext_prop 404 */ 405bool has_vk_extension_property_array( 406 const VkExtensionProperties *vk_ext_prop, 407 const uint32_t count, 408 const VkExtensionProperties *ext_array) 409{ 410 for (uint32_t i = 0; i < count; i++) { 411 if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) 412 return true; 413 } 414 return false; 415} 416 417/** 418 * Search the given ext_list for an extension 419 * matching the given vk_ext_prop 420 */ 421bool has_vk_extension_property( 422 const VkExtensionProperties *vk_ext_prop, 423 const struct loader_extension_list *ext_list) 424{ 425 for (uint32_t i = 0; i < ext_list->count; i++) { 426 if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) 427 return true; 428 } 429 return false; 430} 431 432static inline bool loader_is_layer_type_device(const enum layer_type type) { 433 if ((type & VK_LAYER_TYPE_DEVICE_EXPLICIT) || 434 (type & VK_LAYER_TYPE_DEVICE_IMPLICIT)) 435 return true; 436 return false; 437} 438 439/* 440 * Search the given layer list for a layer matching the given layer name 441 */ 442static struct loader_layer_properties *loader_get_layer_property( 443 const char *name, 444 const struct loader_layer_list *layer_list) 445{ 446 for (uint32_t i = 0; i < layer_list->count; i++) { 447 const VkLayerProperties *item = &layer_list->list[i].info; 448 if (strcmp(name, item->layerName) == 0) 449 return &layer_list->list[i]; 450 } 451 return NULL; 452} 453 454/** 455 * Get the next unused layer property in the list. Init the property to zero. 456 */ 457static struct loader_layer_properties *loader_get_next_layer_property( 458 const struct loader_instance *inst, 459 struct loader_layer_list *layer_list) 460{ 461 if (layer_list->capacity == 0) { 462 layer_list->list = loader_heap_alloc(inst, 463 sizeof(struct loader_layer_properties) * 64, 464 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 465 if (layer_list->list == NULL) { 466 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't add any layer properties to list"); 467 return NULL; 468 } 469 memset(layer_list->list, 0, sizeof(struct loader_layer_properties) * 64); 470 layer_list->capacity = sizeof(struct loader_layer_properties) * 64; 471 } 472 473 // ensure enough room to add an entry 474 if ((layer_list->count + 1) * sizeof (struct loader_layer_properties) 475 > layer_list->capacity) { 476 layer_list->list = loader_heap_realloc(inst, layer_list->list, 477 layer_list->capacity, 478 layer_list->capacity * 2, 479 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 480 if (layer_list->list == NULL) { 481 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 482 "realloc failed for layer list"); 483 } 484 layer_list->capacity *= 2; 485 } 486 487 layer_list->count++; 488 return &(layer_list->list[layer_list->count - 1]); 489} 490 491/** 492 * Remove all layer properties entrys from the list 493 */ 494void loader_delete_layer_properties( 495 const struct loader_instance *inst, 496 struct loader_layer_list *layer_list) 497{ 498 uint32_t i; 499 500 if (!layer_list) 501 return; 502 503 for (i = 0; i < layer_list->count; i++) { 504 loader_destroy_ext_list(inst, &layer_list->list[i].instance_extension_list); 505 loader_destroy_ext_list(inst, &layer_list->list[i].device_extension_list); 506 } 507 layer_list->count = 0; 508 509 if (layer_list->capacity > 0) { 510 layer_list->capacity = 0; 511 loader_heap_free(inst, layer_list->list); 512 } 513 514} 515 516static void loader_add_global_extensions( 517 const struct loader_instance *inst, 518 const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, 519 const char *lib_name, 520 struct loader_extension_list *ext_list) 521{ 522 uint32_t i, count; 523 VkExtensionProperties *ext_props; 524 VkResult res; 525 526 if (!fp_get_props) { 527 /* No EnumerateInstanceExtensionProperties defined */ 528 return; 529 } 530 531 res = fp_get_props(NULL, &count, NULL); 532 if (res != VK_SUCCESS) { 533 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Error getting global extension count from %s", lib_name); 534 return; 535 } 536 537 if (count == 0) { 538 /* No ExtensionProperties to report */ 539 return; 540 } 541 542 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); 543 544 res = fp_get_props(NULL, &count, ext_props); 545 if (res != VK_SUCCESS) { 546 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Error getting global extensions from %s", lib_name); 547 return; 548 } 549 550 for (i = 0; i < count; i++) { 551 char spec_version[64]; 552 553 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 554 VK_MAJOR(ext_props[i].specVersion), 555 VK_MINOR(ext_props[i].specVersion), 556 VK_PATCH(ext_props[i].specVersion)); 557 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 558 "Global Extension: %s (%s) version %s", 559 ext_props[i].extensionName, lib_name, spec_version); 560 loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 561 } 562 563 return; 564} 565 566/* 567 * Initialize ext_list with the physical device extensions. 568 * The extension properties are passed as inputs in count and ext_props. 569 */ 570static VkResult loader_init_physical_device_extensions( 571 const struct loader_instance *inst, 572 struct loader_physical_device *phys_dev, 573 uint32_t count, 574 VkExtensionProperties *ext_props, 575 struct loader_extension_list *ext_list) 576{ 577 VkResult res; 578 uint32_t i; 579 580 if (!loader_init_ext_list(inst, ext_list)) { 581 return VK_ERROR_OUT_OF_HOST_MEMORY; 582 } 583 584 for (i = 0; i < count; i++) { 585 char spec_version[64]; 586 587 snprintf(spec_version, sizeof (spec_version), "%d.%d.%d", 588 VK_MAJOR(ext_props[i].specVersion), 589 VK_MINOR(ext_props[i].specVersion), 590 VK_PATCH(ext_props[i].specVersion)); 591 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 592 "PhysicalDevice Extension: %s (%s) version %s", 593 ext_props[i].extensionName, phys_dev->this_icd->this_icd_lib->lib_name, spec_version); 594 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 595 if (res != VK_SUCCESS) 596 return res; 597 } 598 599 return VK_SUCCESS; 600} 601 602static VkResult loader_add_physical_device_extensions( 603 const struct loader_instance *inst, 604 VkPhysicalDevice physical_device, 605 const char *lib_name, 606 struct loader_extension_list *ext_list) 607{ 608 uint32_t i, count; 609 VkResult res; 610 VkExtensionProperties *ext_props; 611 612 res = loader_EnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL); 613 if (res == VK_SUCCESS && count > 0) { 614 ext_props = loader_stack_alloc(count * sizeof (VkExtensionProperties)); 615 if (!ext_props) 616 return VK_ERROR_OUT_OF_HOST_MEMORY; 617 res = loader_EnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props); 618 if (res != VK_SUCCESS) 619 return res; 620 for (i = 0; i < count; i++) { 621 char spec_version[64]; 622 623 snprintf(spec_version, sizeof (spec_version), "%d.%d.%d", 624 VK_MAJOR(ext_props[i].specVersion), 625 VK_MINOR(ext_props[i].specVersion), 626 VK_PATCH(ext_props[i].specVersion)); 627 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 628 "PhysicalDevice Extension: %s (%s) version %s", 629 ext_props[i].extensionName, lib_name, spec_version); 630 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 631 if (res != VK_SUCCESS) 632 return res; 633 } 634 } else { 635 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Error getting physical device extension info count from library %s", lib_name); 636 return res; 637 } 638 639 return VK_SUCCESS; 640} 641 642static bool loader_init_ext_list(const struct loader_instance *inst, 643 struct loader_extension_list *ext_info) 644{ 645 ext_info->capacity = 32 * sizeof(VkExtensionProperties); 646 ext_info->list = loader_heap_alloc(inst, ext_info->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 647 if (ext_info->list == NULL) { 648 return false; 649 } 650 memset(ext_info->list, 0, ext_info->capacity); 651 ext_info->count = 0; 652 return true; 653} 654 655void loader_destroy_ext_list(const struct loader_instance *inst, 656 struct loader_extension_list *ext_info) 657{ 658 loader_heap_free(inst, ext_info->list); 659 ext_info->count = 0; 660 ext_info->capacity = 0; 661} 662 663/* 664 * Append non-duplicate extension properties defined in props 665 * to the given ext_list. 666 * Return 667 * Vk_SUCCESS on success 668 */ 669VkResult loader_add_to_ext_list( 670 const struct loader_instance *inst, 671 struct loader_extension_list *ext_list, 672 uint32_t prop_list_count, 673 const VkExtensionProperties *props) 674{ 675 uint32_t i; 676 const VkExtensionProperties *cur_ext; 677 678 if (ext_list->list == NULL || ext_list->capacity == 0) { 679 loader_init_ext_list(inst, ext_list); 680 } 681 682 if (ext_list->list == NULL) 683 return VK_ERROR_OUT_OF_HOST_MEMORY; 684 685 for (i = 0; i < prop_list_count; i++) { 686 cur_ext = &props[i]; 687 688 // look for duplicates 689 if (has_vk_extension_property(cur_ext, ext_list)) { 690 continue; 691 } 692 693 // add to list at end 694 // check for enough capacity 695 if (ext_list->count * sizeof(VkExtensionProperties) 696 >= ext_list->capacity) { 697 698 ext_list->list = loader_heap_realloc(inst, 699 ext_list->list, 700 ext_list->capacity, 701 ext_list->capacity * 2, 702 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 703 704 if (ext_list->list == NULL) 705 return VK_ERROR_OUT_OF_HOST_MEMORY; 706 707 // double capacity 708 ext_list->capacity *= 2; 709 } 710 711 memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties)); 712 ext_list->count++; 713 } 714 return VK_SUCCESS; 715} 716 717/** 718 * Search the given search_list for any layers in the props list. 719 * Add these to the output layer_list. Don't add duplicates to the output layer_list. 720 */ 721static VkResult loader_add_layer_names_to_list( 722 const struct loader_instance *inst, 723 struct loader_layer_list *output_list, 724 uint32_t name_count, 725 const char * const *names, 726 const struct loader_layer_list *search_list) 727{ 728 struct loader_layer_properties *layer_prop; 729 VkResult err = VK_SUCCESS; 730 731 for (uint32_t i = 0; i < name_count; i++) { 732 const char *search_target = names[i]; 733 layer_prop = loader_get_layer_property(search_target, search_list); 734 if (!layer_prop) { 735 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Unable to find layer %s", search_target); 736 err = VK_ERROR_LAYER_NOT_PRESENT; 737 continue; 738 } 739 740 loader_add_to_layer_list(inst, output_list, 1, layer_prop); 741 } 742 743 return err; 744} 745 746 747/* 748 * Manage lists of VkLayerProperties 749 */ 750static bool loader_init_layer_list(const struct loader_instance *inst, 751 struct loader_layer_list *list) 752{ 753 list->capacity = 32 * sizeof(struct loader_layer_properties); 754 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 755 if (list->list == NULL) { 756 return false; 757 } 758 memset(list->list, 0, list->capacity); 759 list->count = 0; 760 return true; 761} 762 763void loader_destroy_layer_list(const struct loader_instance *inst, 764 struct loader_layer_list *layer_list) 765{ 766 loader_heap_free(inst, layer_list->list); 767 layer_list->count = 0; 768 layer_list->capacity = 0; 769} 770 771/* 772 * Manage list of layer libraries (loader_lib_info) 773 */ 774static bool loader_init_layer_library_list(const struct loader_instance *inst, 775 struct loader_layer_library_list *list) 776{ 777 list->capacity = 32 * sizeof(struct loader_lib_info); 778 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 779 if (list->list == NULL) { 780 return false; 781 } 782 memset(list->list, 0, list->capacity); 783 list->count = 0; 784 return true; 785} 786 787void loader_destroy_layer_library_list(const struct loader_instance *inst, 788 struct loader_layer_library_list *list) 789{ 790 for (uint32_t i = 0; i < list->count; i++) { 791 loader_heap_free(inst, list->list[i].lib_name); 792 } 793 loader_heap_free(inst, list->list); 794 list->count = 0; 795 list->capacity = 0; 796} 797 798void loader_add_to_layer_library_list( 799 const struct loader_instance *inst, 800 struct loader_layer_library_list *list, 801 uint32_t item_count, 802 const struct loader_lib_info *new_items) 803{ 804 uint32_t i; 805 struct loader_lib_info *item; 806 807 if (list->list == NULL || list->capacity == 0) { 808 loader_init_layer_library_list(inst, list); 809 } 810 811 if (list->list == NULL) 812 return; 813 814 for (i = 0; i < item_count; i++) { 815 item = (struct loader_lib_info *) &new_items[i]; 816 817 // look for duplicates 818 for (uint32_t j = 0; j < list->count; j++) { 819 if (strcmp(list->list[i].lib_name, new_items->lib_name) == 0) { 820 continue; 821 } 822 } 823 824 // add to list at end 825 // check for enough capacity 826 if (list->count * sizeof(struct loader_lib_info) 827 >= list->capacity) { 828 829 list->list = loader_heap_realloc(inst, 830 list->list, 831 list->capacity, 832 list->capacity * 2, 833 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 834 // double capacity 835 list->capacity *= 2; 836 } 837 838 memcpy(&list->list[list->count], item, sizeof(struct loader_lib_info)); 839 list->count++; 840 } 841} 842 843 844/* 845 * Search the given layer list for a list 846 * matching the given VkLayerProperties 847 */ 848bool has_vk_layer_property( 849 const VkLayerProperties *vk_layer_prop, 850 const struct loader_layer_list *list) 851{ 852 for (uint32_t i = 0; i < list->count; i++) { 853 if (strcmp(vk_layer_prop->layerName, list->list[i].info.layerName) == 0) 854 return true; 855 } 856 return false; 857} 858 859/* 860 * Search the given layer list for a layer 861 * matching the given name 862 */ 863bool has_layer_name( 864 const char *name, 865 const struct loader_layer_list *list) 866{ 867 for (uint32_t i = 0; i < list->count; i++) { 868 if (strcmp(name, list->list[i].info.layerName) == 0) 869 return true; 870 } 871 return false; 872} 873 874/* 875 * Append non-duplicate layer properties defined in prop_list 876 * to the given layer_info list 877 */ 878void loader_add_to_layer_list( 879 const struct loader_instance *inst, 880 struct loader_layer_list *list, 881 uint32_t prop_list_count, 882 const struct loader_layer_properties *props) 883{ 884 uint32_t i; 885 struct loader_layer_properties *layer; 886 887 if (list->list == NULL || list->capacity == 0) { 888 loader_init_layer_list(inst, list); 889 } 890 891 if (list->list == NULL) 892 return; 893 894 for (i = 0; i < prop_list_count; i++) { 895 layer = (struct loader_layer_properties *) &props[i]; 896 897 // look for duplicates 898 if (has_vk_layer_property(&layer->info, list)) { 899 continue; 900 } 901 902 // add to list at end 903 // check for enough capacity 904 if (list->count * sizeof(struct loader_layer_properties) 905 >= list->capacity) { 906 907 list->list = loader_heap_realloc(inst, 908 list->list, 909 list->capacity, 910 list->capacity * 2, 911 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 912 // double capacity 913 list->capacity *= 2; 914 } 915 916 memcpy(&list->list[list->count], layer, sizeof(struct loader_layer_properties)); 917 list->count++; 918 } 919} 920 921/** 922 * Search the search_list for any layer with a name 923 * that matches the given name and a type that matches the given type 924 * Add all matching layers to the found_list 925 * Do not add if found loader_layer_properties is already 926 * on the found_list. 927 */ 928static void loader_find_layer_name_add_list( 929 const struct loader_instance *inst, 930 const char *name, 931 const enum layer_type type, 932 const struct loader_layer_list *search_list, 933 struct loader_layer_list *found_list) 934{ 935 bool found = false; 936 for (uint32_t i = 0; i < search_list->count; i++) { 937 struct loader_layer_properties *layer_prop = &search_list->list[i]; 938 if (0 == strcmp(layer_prop->info.layerName, name) && 939 (layer_prop->type & type)) { 940 /* Found a layer with the same name, add to found_list */ 941 loader_add_to_layer_list(inst, found_list, 1, layer_prop); 942 found = true; 943 } 944 } 945 if (!found) { 946 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Warning, couldn't find layer name %s to activate", name); 947 } 948} 949 950static VkExtensionProperties *get_extension_property( 951 const char *name, 952 const struct loader_extension_list *list) 953{ 954 for (uint32_t i = 0; i < list->count; i++) { 955 if (strcmp(name, list->list[i].extensionName) == 0) 956 return &list->list[i]; 957 } 958 return NULL; 959} 960 961/* 962 * For global extensions implemented within the loader (i.e. DEBUG_REPORT 963 * the extension must provide two entry points for the loader to use: 964 * - "trampoline" entry point - this is the address returned by GetProcAddr 965 * and will always do what's necessary to support a global call. 966 * - "terminator" function - this function will be put at the end of the 967 * instance chain and will contain the necessary logic to call / process 968 * the extension for the appropriate ICDs that are available. 969 * There is no generic mechanism for including these functions, the references 970 * must be placed into the appropriate loader entry points. 971 * GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr requests 972 * loader_coalesce_extensions(void) - add extension records to the list of global 973 * extension available to the app. 974 * instance_disp - add function pointer for terminator function to this array. 975 * The extension itself should be in a separate file that will be 976 * linked directly with the loader. 977 */ 978 979void loader_get_icd_loader_instance_extensions( 980 const struct loader_instance *inst, 981 struct loader_icd_libs *icd_libs, 982 struct loader_extension_list *inst_exts) 983{ 984 struct loader_extension_list icd_exts; 985 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Build ICD instance extension list"); 986 // traverse scanned icd list adding non-duplicate extensions to the list 987 for (uint32_t i = 0; i < icd_libs->count; i++) { 988 loader_init_ext_list(inst, &icd_exts); 989 loader_add_global_extensions(inst, icd_libs->list[i].EnumerateInstanceExtensionProperties, 990 icd_libs->list[i].lib_name, 991 &icd_exts); 992 loader_add_to_ext_list(inst, inst_exts, 993 icd_exts.count, 994 icd_exts.list); 995 loader_destroy_ext_list(inst, &icd_exts); 996 }; 997 998 // Traverse loader's extensions, adding non-duplicate extensions to the list 999 wsi_add_instance_extensions(inst, inst_exts); 1000 debug_report_add_instance_extensions(inst, inst_exts); 1001} 1002 1003struct loader_icd *loader_get_icd_and_device(const VkDevice device, 1004 struct loader_device **found_dev) 1005{ 1006 *found_dev = NULL; 1007 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 1008 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) { 1009 for (struct loader_device *dev = icd->logical_device_list; dev; dev = dev->next) 1010 /* Value comparison of device prevents object wrapping by layers */ 1011 if (loader_get_dispatch(dev->device) == loader_get_dispatch(device)) { 1012 *found_dev = dev; 1013 return icd; 1014 } 1015 } 1016 } 1017 return NULL; 1018} 1019 1020static void loader_destroy_logical_device(const struct loader_instance *inst, 1021 struct loader_device *dev) 1022{ 1023 loader_heap_free(inst, dev->app_extension_props); 1024 if (dev->activated_layer_list.count) 1025 loader_destroy_layer_list(inst, &dev->activated_layer_list); 1026 loader_heap_free(inst, dev); 1027} 1028 1029static struct loader_device *loader_add_logical_device( 1030 const struct loader_instance *inst, 1031 const VkDevice dev, 1032 struct loader_device **device_list) 1033{ 1034 struct loader_device *new_dev; 1035 1036 new_dev = loader_heap_alloc(inst, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); 1037 if (!new_dev) { 1038 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc struct laoder-device"); 1039 return NULL; 1040 } 1041 1042 memset(new_dev, 0, sizeof(struct loader_device)); 1043 1044 new_dev->next = *device_list; 1045 new_dev->device = dev; 1046 *device_list = new_dev; 1047 return new_dev; 1048} 1049 1050void loader_remove_logical_device( 1051 const struct loader_instance *inst, 1052 struct loader_icd *icd, 1053 struct loader_device *found_dev) 1054{ 1055 struct loader_device *dev, *prev_dev; 1056 1057 if (!icd || !found_dev) 1058 return; 1059 1060 prev_dev = NULL; 1061 dev = icd->logical_device_list; 1062 while (dev && dev != found_dev) { 1063 prev_dev = dev; 1064 dev = dev->next; 1065 } 1066 1067 if (prev_dev) 1068 prev_dev->next = found_dev->next; 1069 else 1070 icd->logical_device_list = found_dev->next; 1071 loader_destroy_logical_device(inst, found_dev); 1072} 1073 1074 1075static void loader_icd_destroy( 1076 struct loader_instance *ptr_inst, 1077 struct loader_icd *icd) 1078{ 1079 ptr_inst->total_icd_count--; 1080 for (struct loader_device *dev = icd->logical_device_list; dev; ) { 1081 struct loader_device *next_dev = dev->next; 1082 loader_destroy_logical_device(ptr_inst, dev); 1083 dev = next_dev; 1084 } 1085 1086 loader_heap_free(ptr_inst, icd); 1087} 1088 1089static struct loader_icd * loader_icd_create(const struct loader_instance *inst) 1090{ 1091 struct loader_icd *icd; 1092 1093 icd = loader_heap_alloc(inst, sizeof(*icd), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1094 if (!icd) 1095 return NULL; 1096 1097 memset(icd, 0, sizeof(*icd)); 1098 1099 return icd; 1100} 1101 1102static struct loader_icd *loader_icd_add( 1103 struct loader_instance *ptr_inst, 1104 const struct loader_scanned_icds *icd_lib) 1105{ 1106 struct loader_icd *icd; 1107 1108 icd = loader_icd_create(ptr_inst); 1109 if (!icd) 1110 return NULL; 1111 1112 icd->this_icd_lib = icd_lib; 1113 icd->this_instance = ptr_inst; 1114 1115 /* prepend to the list */ 1116 icd->next = ptr_inst->icds; 1117 ptr_inst->icds = icd; 1118 ptr_inst->total_icd_count++; 1119 1120 return icd; 1121} 1122 1123void loader_scanned_icd_clear( 1124 const struct loader_instance *inst, 1125 struct loader_icd_libs *icd_libs) 1126{ 1127 if (icd_libs->capacity == 0) 1128 return; 1129 for (uint32_t i = 0; i < icd_libs->count; i++) { 1130 loader_platform_close_library(icd_libs->list[i].handle); 1131 loader_heap_free(inst, icd_libs->list[i].lib_name); 1132 } 1133 loader_heap_free(inst, icd_libs->list); 1134 icd_libs->capacity = 0; 1135 icd_libs->count = 0; 1136 icd_libs->list = NULL; 1137} 1138 1139static void loader_scanned_icd_init(const struct loader_instance *inst, 1140 struct loader_icd_libs *icd_libs) 1141{ 1142 loader_scanned_icd_clear(inst, icd_libs); 1143 icd_libs->capacity = 8 * sizeof(struct loader_scanned_icds); 1144 icd_libs->list = loader_heap_alloc(inst, icd_libs->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1145 1146} 1147 1148static void loader_scanned_icd_add( 1149 const struct loader_instance *inst, 1150 struct loader_icd_libs *icd_libs, 1151 const char *filename, 1152 uint32_t api_version) 1153{ 1154 loader_platform_dl_handle handle; 1155 PFN_vkCreateInstance fp_create_inst; 1156 PFN_vkEnumerateInstanceExtensionProperties fp_get_global_ext_props; 1157 PFN_vkGetInstanceProcAddr fp_get_proc_addr; 1158 struct loader_scanned_icds *new_node; 1159 1160 /* TODO implement ref counting of libraries, for now this function leaves 1161 libraries open and the scanned_icd_clear closes them */ 1162 // Used to call: dlopen(filename, RTLD_LAZY); 1163 handle = loader_platform_open_library(filename); 1164 if (!handle) { 1165 loader_log(VK_DBG_REPORT_WARN_BIT, 0, loader_platform_open_library_error(filename)); 1166 return; 1167 } 1168 1169#define LOOKUP_LD(func_ptr, func) do { \ 1170 func_ptr = (PFN_vk ##func) loader_platform_get_proc_address(handle, "vk" #func); \ 1171 if (!func_ptr) { \ 1172 loader_log(VK_DBG_REPORT_WARN_BIT, 0, loader_platform_get_proc_address_error("vk" #func)); \ 1173 return; \ 1174 } \ 1175} while (0) 1176 1177 LOOKUP_LD(fp_get_proc_addr, GetInstanceProcAddr); 1178 LOOKUP_LD(fp_create_inst, CreateInstance); 1179 LOOKUP_LD(fp_get_global_ext_props, EnumerateInstanceExtensionProperties); 1180 1181#undef LOOKUP_LD 1182 1183 // check for enough capacity 1184 if ((icd_libs->count * sizeof(struct loader_scanned_icds)) >= icd_libs->capacity) { 1185 1186 icd_libs->list = loader_heap_realloc(inst, 1187 icd_libs->list, 1188 icd_libs->capacity, 1189 icd_libs->capacity * 2, 1190 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1191 // double capacity 1192 icd_libs->capacity *= 2; 1193 } 1194 new_node = &(icd_libs->list[icd_libs->count]); 1195 1196 new_node->handle = handle; 1197 new_node->api_version = api_version; 1198 new_node->GetInstanceProcAddr = fp_get_proc_addr; 1199 new_node->CreateInstance = fp_create_inst; 1200 new_node->EnumerateInstanceExtensionProperties = fp_get_global_ext_props; 1201 1202 new_node->lib_name = (char *) loader_heap_alloc(inst, 1203 strlen(filename) + 1, 1204 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1205 if (!new_node->lib_name) { 1206 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Out of memory can't add icd"); 1207 return; 1208 } 1209 strcpy(new_node->lib_name, filename); 1210 icd_libs->count++; 1211} 1212 1213static bool loader_icd_init_entrys(struct loader_icd *icd, 1214 VkInstance inst, 1215 const PFN_vkGetInstanceProcAddr fp_gipa) 1216{ 1217 /* initialize entrypoint function pointers */ 1218 1219 #define LOOKUP_GIPA(func, required) do { \ 1220 icd->func = (PFN_vk ##func) fp_gipa(inst, "vk" #func); \ 1221 if (!icd->func && required) { \ 1222 loader_log(VK_DBG_REPORT_WARN_BIT, 0, \ 1223 loader_platform_get_proc_address_error("vk" #func)); \ 1224 return false; \ 1225 } \ 1226 } while (0) 1227 1228 LOOKUP_GIPA(GetDeviceProcAddr, true); 1229 LOOKUP_GIPA(DestroyInstance, true); 1230 LOOKUP_GIPA(EnumeratePhysicalDevices, true); 1231 LOOKUP_GIPA(GetPhysicalDeviceFeatures, true); 1232 LOOKUP_GIPA(GetPhysicalDeviceFormatProperties, true); 1233 LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties, true); 1234 LOOKUP_GIPA(CreateDevice, true); 1235 LOOKUP_GIPA(GetPhysicalDeviceProperties, true); 1236 LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties, true); 1237 LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties, true); 1238 LOOKUP_GIPA(EnumerateDeviceExtensionProperties, true); 1239 LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties, true); 1240 LOOKUP_GIPA(DbgCreateMsgCallback, false); 1241 LOOKUP_GIPA(DbgDestroyMsgCallback, false); 1242 LOOKUP_GIPA(GetPhysicalDeviceSurfaceSupportKHR, false); 1243 LOOKUP_GIPA(GetPhysicalDeviceSurfaceCapabilitiesKHR, false); 1244 LOOKUP_GIPA(GetPhysicalDeviceSurfaceFormatsKHR, false); 1245 LOOKUP_GIPA(GetPhysicalDeviceSurfacePresentModesKHR, false); 1246#ifdef VK_USE_PLATFORM_WIN32_KHR 1247 LOOKUP_GIPA(GetPhysicalDeviceWin32PresentationSupportKHR, false); 1248#endif 1249#ifdef VK_USE_PLATFORM_XCB_KHR 1250 LOOKUP_GIPA(GetPhysicalDeviceXcbPresentationSupportKHR, false); 1251#endif 1252 1253#undef LOOKUP_GIPA 1254 1255 return true; 1256} 1257 1258static void loader_debug_init(void) 1259{ 1260 const char *env; 1261 1262 if (g_loader_debug > 0) 1263 return; 1264 1265 g_loader_debug = 0; 1266 1267 /* parse comma-separated debug options */ 1268 env = getenv("VK_LOADER_DEBUG"); 1269 while (env) { 1270 const char *p = strchr(env, ','); 1271 size_t len; 1272 1273 if (p) 1274 len = p - env; 1275 else 1276 len = strlen(env); 1277 1278 if (len > 0) { 1279 if (strncmp(env, "warn", len) == 0) { 1280 g_loader_debug |= LOADER_WARN_BIT; 1281 g_loader_log_msgs |= VK_DBG_REPORT_WARN_BIT; 1282 } else if (strncmp(env, "info", len) == 0) { 1283 g_loader_debug |= LOADER_INFO_BIT; 1284 g_loader_log_msgs |= VK_DBG_REPORT_INFO_BIT; 1285 } else if (strncmp(env, "perf", len) == 0) { 1286 g_loader_debug |= LOADER_PERF_BIT; 1287 g_loader_log_msgs |= VK_DBG_REPORT_PERF_WARN_BIT; 1288 } else if (strncmp(env, "error", len) == 0) { 1289 g_loader_debug |= LOADER_ERROR_BIT; 1290 g_loader_log_msgs |= VK_DBG_REPORT_ERROR_BIT; 1291 } else if (strncmp(env, "debug", len) == 0) { 1292 g_loader_debug |= LOADER_DEBUG_BIT; 1293 g_loader_log_msgs |= VK_DBG_REPORT_DEBUG_BIT; 1294 } 1295 } 1296 1297 if (!p) 1298 break; 1299 1300 env = p + 1; 1301 } 1302} 1303 1304void loader_initialize(void) 1305{ 1306 // initialize mutexs 1307 loader_platform_thread_create_mutex(&loader_lock); 1308 loader_platform_thread_create_mutex(&loader_json_lock); 1309 1310 // initialize logging 1311 loader_debug_init(); 1312 1313 // initial cJSON to use alloc callbacks 1314 cJSON_Hooks alloc_fns = { 1315 .malloc_fn = loader_tls_heap_alloc, 1316 .free_fn = loader_tls_heap_free, 1317 }; 1318 cJSON_InitHooks(&alloc_fns); 1319} 1320 1321struct loader_manifest_files { 1322 uint32_t count; 1323 char **filename_list; 1324}; 1325 1326/** 1327 * Get next file or dirname given a string list or registry key path 1328 * 1329 * \returns 1330 * A pointer to first char in the next path. 1331 * The next path (or NULL) in the list is returned in next_path. 1332 * Note: input string is modified in some cases. PASS IN A COPY! 1333 */ 1334static char *loader_get_next_path(char *path) 1335{ 1336 uint32_t len; 1337 char *next; 1338 1339 if (path == NULL) 1340 return NULL; 1341 next = strchr(path, PATH_SEPERATOR); 1342 if (next == NULL) { 1343 len = (uint32_t) strlen(path); 1344 next = path + len; 1345 } 1346 else { 1347 *next = '\0'; 1348 next++; 1349 } 1350 1351 return next; 1352} 1353 1354/** 1355 * Given a path which is absolute or relative, expand the path if relative or 1356 * leave the path unmodified if absolute. The base path to prepend to relative 1357 * paths is given in rel_base. 1358 * 1359 * \returns 1360 * A string in out_fullpath of the full absolute path 1361 */ 1362static void loader_expand_path(const char *path, 1363 const char *rel_base, 1364 size_t out_size, 1365 char *out_fullpath) 1366{ 1367 if (loader_platform_is_path_absolute(path)) { 1368 // do not prepend a base to an absolute path 1369 rel_base = ""; 1370 } 1371 1372 loader_platform_combine_path(out_fullpath, out_size, rel_base, path, NULL); 1373} 1374 1375/** 1376 * Given a filename (file) and a list of paths (dir), try to find an existing 1377 * file in the paths. If filename already is a path then no 1378 * searching in the given paths. 1379 * 1380 * \returns 1381 * A string in out_fullpath of either the full path or file. 1382 */ 1383static void loader_get_fullpath(const char *file, 1384 const char *dirs, 1385 size_t out_size, 1386 char *out_fullpath) 1387{ 1388 if (!loader_platform_is_path(file) && *dirs) { 1389 char *dirs_copy, *dir, *next_dir; 1390 1391 dirs_copy = loader_stack_alloc(strlen(dirs) + 1); 1392 strcpy(dirs_copy, dirs); 1393 1394 //find if file exists after prepending paths in given list 1395 for (dir = dirs_copy; 1396 *dir && (next_dir = loader_get_next_path(dir)); 1397 dir = next_dir) { 1398 loader_platform_combine_path(out_fullpath, out_size, dir, file, NULL); 1399 if (loader_platform_file_exists(out_fullpath)) { 1400 return; 1401 } 1402 } 1403 } 1404 1405 snprintf(out_fullpath, out_size, "%s", file); 1406} 1407 1408/** 1409 * Read a JSON file into a buffer. 1410 * 1411 * \returns 1412 * A pointer to a cJSON object representing the JSON parse tree. 1413 * This returned buffer should be freed by caller. 1414 */ 1415static cJSON *loader_get_json(const char *filename) 1416{ 1417 FILE *file; 1418 char *json_buf; 1419 cJSON *json; 1420 uint64_t len; 1421 file = fopen(filename,"rb"); 1422 if (!file) { 1423 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Couldn't open JSON file %s", filename); 1424 return NULL; 1425 } 1426 fseek(file, 0, SEEK_END); 1427 len = ftell(file); 1428 fseek(file, 0, SEEK_SET); 1429 json_buf = (char*) loader_stack_alloc(len+1); 1430 if (json_buf == NULL) { 1431 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get JSON file"); 1432 fclose(file); 1433 return NULL; 1434 } 1435 if (fread(json_buf, sizeof(char), len, file) != len) { 1436 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "fread failed can't get JSON file"); 1437 fclose(file); 1438 return NULL; 1439 } 1440 fclose(file); 1441 json_buf[len] = '\0'; 1442 1443 //parse text from file 1444 json = cJSON_Parse(json_buf); 1445 if (json == NULL) 1446 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Can't parse JSON file %s", filename); 1447 return json; 1448} 1449 1450/** 1451 * Do a deep copy of the loader_layer_properties structure. 1452 */ 1453static void loader_copy_layer_properties( 1454 const struct loader_instance *inst, 1455 struct loader_layer_properties *dst, 1456 struct loader_layer_properties *src) 1457{ 1458 memcpy(dst, src, sizeof (*src)); 1459 dst->instance_extension_list.list = loader_heap_alloc( 1460 inst, 1461 sizeof(VkExtensionProperties) * 1462 src->instance_extension_list.count, 1463 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1464 dst->instance_extension_list.capacity = sizeof(VkExtensionProperties) * 1465 src->instance_extension_list.count; 1466 memcpy(dst->instance_extension_list.list, src->instance_extension_list.list, 1467 dst->instance_extension_list.capacity); 1468 dst->device_extension_list.list = loader_heap_alloc( 1469 inst, 1470 sizeof(VkExtensionProperties) * 1471 src->device_extension_list.count, 1472 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1473 dst->device_extension_list.capacity = sizeof(VkExtensionProperties) * 1474 src->device_extension_list.count; 1475 memcpy(dst->device_extension_list.list, src->device_extension_list.list, 1476 dst->device_extension_list.capacity); 1477} 1478 1479/** 1480 * Given a cJSON struct (json) of the top level JSON object from layer manifest 1481 * file, add entry to the layer_list. 1482 * Fill out the layer_properties in this list entry from the input cJSON object. 1483 * 1484 * \returns 1485 * void 1486 * layer_list has a new entry and initialized accordingly. 1487 * If the json input object does not have all the required fields no entry 1488 * is added to the list. 1489 */ 1490static void loader_add_layer_properties(const struct loader_instance *inst, 1491 struct loader_layer_list *layer_instance_list, 1492 struct loader_layer_list *layer_device_list, 1493 cJSON *json, 1494 bool is_implicit, 1495 char *filename) 1496{ 1497 /* Fields in layer manifest file that are required: 1498 * (required) “file_format_version” 1499 * following are required in the "layer" object: 1500 * (required) "name" 1501 * (required) "type" 1502 * (required) “library_path” 1503 * (required) “api_version” 1504 * (required) “implementation_version” 1505 * (required) “description” 1506 * (required for implicit layers) “disable_environment” 1507 * 1508 * First get all required items and if any missing abort 1509 */ 1510 1511 cJSON *item, *layer_node, *ext_item; 1512 char *temp; 1513 char *name, *type, *library_path, *api_version; 1514 char *implementation_version, *description; 1515 cJSON *disable_environment; 1516 int i; 1517 VkExtensionProperties ext_prop; 1518 item = cJSON_GetObjectItem(json, "file_format_version"); 1519 if (item == NULL) { 1520 return; 1521 } 1522 char *file_vers = cJSON_PrintUnformatted(item); 1523 loader_log(VK_DBG_REPORT_INFO_BIT, 0, "Found manifest file %s, version %s", 1524 filename, file_vers); 1525 if (strcmp(file_vers, "\"1.0.0\"") != 0) 1526 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Unexpected manifest file version (expected 1.0.0), may cause errors"); 1527 loader_tls_heap_free(file_vers); 1528 1529 layer_node = cJSON_GetObjectItem(json, "layer"); 1530 if (layer_node == NULL) { 1531 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"layer\" object in manifest JSON file, skipping"); 1532 return; 1533 } 1534 1535 // loop through all "layer" objects in the file 1536 do { 1537#define GET_JSON_OBJECT(node, var) { \ 1538 var = cJSON_GetObjectItem(node, #var); \ 1539 if (var == NULL) { \ 1540 layer_node = layer_node->next; \ 1541 continue; \ 1542 } \ 1543 } 1544#define GET_JSON_ITEM(node, var) { \ 1545 item = cJSON_GetObjectItem(node, #var); \ 1546 if (item == NULL) { \ 1547 layer_node = layer_node->next; \ 1548 continue; \ 1549 } \ 1550 temp = cJSON_Print(item); \ 1551 temp[strlen(temp) - 1] = '\0'; \ 1552 var = loader_stack_alloc(strlen(temp) + 1); \ 1553 strcpy(var, &temp[1]); \ 1554 loader_tls_heap_free(temp); \ 1555 } 1556 GET_JSON_ITEM(layer_node, name) 1557 GET_JSON_ITEM(layer_node, type) 1558 GET_JSON_ITEM(layer_node, library_path) 1559 GET_JSON_ITEM(layer_node, api_version) 1560 GET_JSON_ITEM(layer_node, implementation_version) 1561 GET_JSON_ITEM(layer_node, description) 1562 if (is_implicit) { 1563 GET_JSON_OBJECT(layer_node, disable_environment) 1564 } 1565#undef GET_JSON_ITEM 1566#undef GET_JSON_OBJECT 1567 1568 // add list entry 1569 struct loader_layer_properties *props=NULL; 1570 if (!strcmp(type, "DEVICE")) { 1571 if (layer_device_list == NULL) { 1572 layer_node = layer_node->next; 1573 continue; 1574 } 1575 props = loader_get_next_layer_property(inst, layer_device_list); 1576 props->type = (is_implicit) ? VK_LAYER_TYPE_DEVICE_IMPLICIT : VK_LAYER_TYPE_DEVICE_EXPLICIT; 1577 } 1578 if (!strcmp(type, "INSTANCE")) { 1579 if (layer_instance_list == NULL) { 1580 layer_node = layer_node->next; 1581 continue; 1582 } 1583 props = loader_get_next_layer_property(inst, layer_instance_list); 1584 props->type = (is_implicit) ? VK_LAYER_TYPE_INSTANCE_IMPLICIT : VK_LAYER_TYPE_INSTANCE_EXPLICIT; 1585 } 1586 if (!strcmp(type, "GLOBAL")) { 1587 if (layer_instance_list != NULL) 1588 props = loader_get_next_layer_property(inst, layer_instance_list); 1589 else if (layer_device_list != NULL) 1590 props = loader_get_next_layer_property(inst, layer_device_list); 1591 else { 1592 layer_node = layer_node->next; 1593 continue; 1594 } 1595 props->type = (is_implicit) ? VK_LAYER_TYPE_GLOBAL_IMPLICIT : VK_LAYER_TYPE_GLOBAL_EXPLICIT; 1596 } 1597 1598 if (props == NULL) { 1599 layer_node = layer_node->next; 1600 continue; 1601 } 1602 1603 strncpy(props->info.layerName, name, sizeof (props->info.layerName)); 1604 props->info.layerName[sizeof (props->info.layerName) - 1] = '\0'; 1605 1606 char *fullpath = props->lib_name; 1607 char *rel_base; 1608 if (loader_platform_is_path(library_path)) { 1609 // a relative or absolute path 1610 char *name_copy = loader_stack_alloc(strlen(filename) + 1); 1611 strcpy(name_copy, filename); 1612 rel_base = loader_platform_dirname(name_copy); 1613 loader_expand_path(library_path, rel_base, MAX_STRING_SIZE, fullpath); 1614 } else { 1615 // a filename which is assumed in a system directory 1616 loader_get_fullpath(library_path, DEFAULT_VK_LAYERS_PATH, MAX_STRING_SIZE, fullpath); 1617 } 1618 props->info.specVersion = loader_make_version(api_version); 1619 props->info.implementationVersion = atoi(implementation_version); 1620 strncpy((char *) props->info.description, description, sizeof (props->info.description)); 1621 props->info.description[sizeof (props->info.description) - 1] = '\0'; 1622 if (is_implicit) { 1623 strncpy(props->disable_env_var.name, disable_environment->child->string, sizeof (props->disable_env_var.name)); 1624 props->disable_env_var.name[sizeof (props->disable_env_var.name) - 1] = '\0'; 1625 strncpy(props->disable_env_var.value, disable_environment->child->valuestring, sizeof (props->disable_env_var.value)); 1626 props->disable_env_var.value[sizeof (props->disable_env_var.value) - 1] = '\0'; 1627 } 1628 1629 /** 1630 * Now get all optional items and objects and put in list: 1631 * functions 1632 * instance_extensions 1633 * device_extensions 1634 * enable_environment (implicit layers only) 1635 */ 1636#define GET_JSON_OBJECT(node, var) { \ 1637 var = cJSON_GetObjectItem(node, #var); \ 1638 } 1639#define GET_JSON_ITEM(node, var) { \ 1640 item = cJSON_GetObjectItem(node, #var); \ 1641 if (item != NULL) { \ 1642 temp = cJSON_Print(item); \ 1643 temp[strlen(temp) - 1] = '\0'; \ 1644 var = loader_stack_alloc(strlen(temp) + 1);\ 1645 strcpy(var, &temp[1]); \ 1646 loader_tls_heap_free(temp); \ 1647 } \ 1648 } 1649 1650 cJSON *instance_extensions, *device_extensions, *functions, *enable_environment; 1651 char *vkGetInstanceProcAddr = NULL, *vkGetDeviceProcAddr = NULL, *spec_version=NULL; 1652 GET_JSON_OBJECT(layer_node, functions) 1653 if (functions != NULL) { 1654 GET_JSON_ITEM(functions, vkGetInstanceProcAddr) 1655 GET_JSON_ITEM(functions, vkGetDeviceProcAddr) 1656 if (vkGetInstanceProcAddr != NULL) 1657 strncpy(props->functions.str_gipa, vkGetInstanceProcAddr, sizeof (props->functions.str_gipa)); 1658 props->functions.str_gipa[sizeof (props->functions.str_gipa) - 1] = '\0'; 1659 if (vkGetDeviceProcAddr != NULL) 1660 strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr, sizeof (props->functions.str_gdpa)); 1661 props->functions.str_gdpa[sizeof (props->functions.str_gdpa) - 1] = '\0'; 1662 } 1663 GET_JSON_OBJECT(layer_node, instance_extensions) 1664 if (instance_extensions != NULL) { 1665 int count = cJSON_GetArraySize(instance_extensions); 1666 for (i = 0; i < count; i++) { 1667 ext_item = cJSON_GetArrayItem(instance_extensions, i); 1668 GET_JSON_ITEM(ext_item, name) 1669 GET_JSON_ITEM(ext_item, spec_version) 1670 strncpy(ext_prop.extensionName, name, sizeof (ext_prop.extensionName)); 1671 ext_prop.extensionName[sizeof (ext_prop.extensionName) - 1] = '\0'; 1672 ext_prop.specVersion = atoi(spec_version); 1673 loader_add_to_ext_list(inst, &props->instance_extension_list, 1, &ext_prop); 1674 } 1675 } 1676 GET_JSON_OBJECT(layer_node, device_extensions) 1677 if (device_extensions != NULL) { 1678 int count = cJSON_GetArraySize(device_extensions); 1679 for (i = 0; i < count; i++) { 1680 ext_item = cJSON_GetArrayItem(device_extensions, i); 1681 GET_JSON_ITEM(ext_item, name); 1682 GET_JSON_ITEM(ext_item, spec_version); 1683 strncpy(ext_prop.extensionName, name, sizeof (ext_prop.extensionName)); 1684 ext_prop.extensionName[sizeof (ext_prop.extensionName) - 1] = '\0'; 1685 ext_prop.specVersion = atoi(spec_version); 1686 loader_add_to_ext_list(inst, &props->device_extension_list, 1, &ext_prop); 1687 } 1688 } 1689 if (is_implicit) { 1690 GET_JSON_OBJECT(layer_node, enable_environment) 1691 strncpy(props->enable_env_var.name, enable_environment->child->string, sizeof (props->enable_env_var.name)); 1692 props->enable_env_var.name[sizeof (props->enable_env_var.name) - 1] = '\0'; 1693 strncpy(props->enable_env_var.value, enable_environment->child->valuestring, sizeof (props->enable_env_var.value)); 1694 props->enable_env_var.value[sizeof (props->enable_env_var.value) - 1] = '\0'; 1695 } 1696#undef GET_JSON_ITEM 1697#undef GET_JSON_OBJECT 1698 // for global layers need to add them to both device and instance list 1699 if (!strcmp(type, "GLOBAL")) { 1700 struct loader_layer_properties *dev_props; 1701 if (layer_instance_list == NULL || layer_device_list == NULL) { 1702 layer_node = layer_node->next; 1703 continue; 1704 } 1705 dev_props = loader_get_next_layer_property(inst, layer_device_list); 1706 //copy into device layer list 1707 loader_copy_layer_properties(inst, dev_props, props); 1708 } 1709 layer_node = layer_node->next; 1710 } while (layer_node != NULL); 1711 return; 1712} 1713 1714/** 1715 * Find the Vulkan library manifest files. 1716 * 1717 * This function scans the location or env_override directories/files 1718 * for a list of JSON manifest files. If env_override is non-NULL 1719 * and has a valid value. Then the location is ignored. Otherwise 1720 * location is used to look for manifest files. The location 1721 * is interpreted as Registry path on Windows and a directory path(s) 1722 * on Linux. 1723 * 1724 * \returns 1725 * A string list of manifest files to be opened in out_files param. 1726 * List has a pointer to string for each manifest filename. 1727 * When done using the list in out_files, pointers should be freed. 1728 * Location or override string lists can be either files or directories as follows: 1729 * | location | override 1730 * -------------------------------- 1731 * Win ICD | files | files 1732 * Win Layer | files | dirs 1733 * Linux ICD | dirs | files 1734 * Linux Layer| dirs | dirs 1735 */ 1736static void loader_get_manifest_files(const struct loader_instance *inst, 1737 const char *env_override, 1738 bool is_layer, 1739 const char *location, 1740 struct loader_manifest_files *out_files) 1741{ 1742 char *override = NULL; 1743 char *loc; 1744 char *file, *next_file, *name; 1745 size_t alloced_count = 64; 1746 char full_path[2048]; 1747 DIR *sysdir = NULL; 1748 bool list_is_dirs = false; 1749 struct dirent *dent; 1750 1751 out_files->count = 0; 1752 out_files->filename_list = NULL; 1753 1754 if (env_override != NULL && (override = getenv(env_override))) { 1755#if !defined(_WIN32) 1756 if (geteuid() != getuid()) { 1757 /* Don't allow setuid apps to use the env var: */ 1758 override = NULL; 1759 } 1760#endif 1761 } 1762 1763 if (location == NULL) { 1764 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 1765 "Can't get manifest files with NULL location, env_override=%s", 1766 env_override); 1767 return; 1768 } 1769 1770#if defined(_WIN32) 1771 list_is_dirs = (is_layer && override != NULL) ? true : false; 1772#else 1773 list_is_dirs = (override == NULL || is_layer) ? true : false; 1774#endif 1775 // Make a copy of the input we are using so it is not modified 1776 // Also handle getting the location(s) from registry on Windows 1777 if (override == NULL) { 1778 loc = loader_stack_alloc(strlen(location) + 1); 1779 if (loc == NULL) { 1780 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1781 return; 1782 } 1783 strcpy(loc, location); 1784#if defined(_WIN32) 1785 loc = loader_get_registry_files(inst, loc); 1786 if (loc == NULL) { 1787 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Registry lookup failed can't get manifest files"); 1788 return; 1789 } 1790#endif 1791 } 1792 else { 1793 loc = loader_stack_alloc(strlen(override) + 1); 1794 if (loc == NULL) { 1795 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1796 return; 1797 } 1798 strcpy(loc, override); 1799 } 1800 1801 // Print out the paths being searched if debugging is enabled 1802 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Searching the following paths for manifest files: %s\n", loc); 1803 1804 file = loc; 1805 while (*file) { 1806 next_file = loader_get_next_path(file); 1807 if (list_is_dirs) { 1808 sysdir = opendir(file); 1809 name = NULL; 1810 if (sysdir) { 1811 dent = readdir(sysdir); 1812 if (dent == NULL) 1813 break; 1814 name = &(dent->d_name[0]); 1815 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1816 name = full_path; 1817 } 1818 } 1819 else { 1820#if defined(_WIN32) 1821 name = file; 1822#else 1823 // only Linux has relative paths 1824 char *dir; 1825 // make a copy of location so it isn't modified 1826 dir = loader_stack_alloc(strlen(loc) + 1); 1827 if (dir == NULL) { 1828 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1829 return; 1830 } 1831 strcpy(dir, loc); 1832 1833 loader_get_fullpath(file, dir, sizeof(full_path), full_path); 1834 1835 name = full_path; 1836#endif 1837 } 1838 while (name) { 1839 /* Look for files ending with ".json" suffix */ 1840 uint32_t nlen = (uint32_t) strlen(name); 1841 const char *suf = name + nlen - 5; 1842 if ((nlen > 5) && !strncmp(suf, ".json", 5)) { 1843 if (out_files->count == 0) { 1844 out_files->filename_list = loader_heap_alloc(inst, 1845 alloced_count * sizeof(char *), 1846 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 1847 } 1848 else if (out_files->count == alloced_count) { 1849 out_files->filename_list = loader_heap_realloc(inst, 1850 out_files->filename_list, 1851 alloced_count * sizeof(char *), 1852 alloced_count * sizeof(char *) * 2, 1853 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 1854 alloced_count *= 2; 1855 } 1856 if (out_files->filename_list == NULL) { 1857 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't alloc manifest file list"); 1858 return; 1859 } 1860 out_files->filename_list[out_files->count] = loader_heap_alloc( 1861 inst, 1862 strlen(name) + 1, 1863 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 1864 if (out_files->filename_list[out_files->count] == NULL) { 1865 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1866 return; 1867 } 1868 strcpy(out_files->filename_list[out_files->count], name); 1869 out_files->count++; 1870 } else if (!list_is_dirs) { 1871 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Skipping manifest file %s, file name must end in .json", name); 1872 } 1873 if (list_is_dirs) { 1874 dent = readdir(sysdir); 1875 if (dent == NULL) 1876 break; 1877 name = &(dent->d_name[0]); 1878 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1879 name = full_path; 1880 } 1881 else { 1882 break; 1883 } 1884 } 1885 if (sysdir) 1886 closedir(sysdir); 1887 file = next_file; 1888 } 1889 return; 1890} 1891 1892void loader_init_icd_lib_list() 1893{ 1894 1895} 1896 1897void loader_destroy_icd_lib_list() 1898{ 1899 1900} 1901/** 1902 * Try to find the Vulkan ICD driver(s). 1903 * 1904 * This function scans the default system loader path(s) or path 1905 * specified by the \c VK_ICD_FILENAMES environment variable in 1906 * order to find loadable VK ICDs manifest files. From these 1907 * manifest files it finds the ICD libraries. 1908 * 1909 * \returns 1910 * a list of icds that were discovered 1911 */ 1912void loader_icd_scan( 1913 const struct loader_instance *inst, 1914 struct loader_icd_libs *icds) 1915{ 1916 char *file_str; 1917 struct loader_manifest_files manifest_files; 1918 1919 loader_scanned_icd_init(inst, icds); 1920 // Get a list of manifest files for ICDs 1921 loader_get_manifest_files(inst, "VK_ICD_FILENAMES", false, 1922 DEFAULT_VK_DRIVERS_INFO, &manifest_files); 1923 if (manifest_files.count == 0) 1924 return; 1925 loader_platform_thread_lock_mutex(&loader_json_lock); 1926 for (uint32_t i = 0; i < manifest_files.count; i++) { 1927 file_str = manifest_files.filename_list[i]; 1928 if (file_str == NULL) 1929 continue; 1930 1931 cJSON *json; 1932 json = loader_get_json(file_str); 1933 if (!json) 1934 continue; 1935 cJSON *item, *itemICD; 1936 item = cJSON_GetObjectItem(json, "file_format_version"); 1937 if (item == NULL) { 1938 loader_platform_thread_unlock_mutex(&loader_json_lock); 1939 return; 1940 } 1941 char *file_vers = cJSON_Print(item); 1942 loader_log(VK_DBG_REPORT_INFO_BIT, 0, "Found manifest file %s, version %s", 1943 file_str, file_vers); 1944 if (strcmp(file_vers, "\"1.0.0\"") != 0) 1945 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Unexpected manifest file version (expected 1.0.0), may cause errors"); 1946 loader_tls_heap_free(file_vers); 1947 itemICD = cJSON_GetObjectItem(json, "ICD"); 1948 if (itemICD != NULL) { 1949 item = cJSON_GetObjectItem(itemICD, "library_path"); 1950 if (item != NULL) { 1951 char *temp= cJSON_Print(item); 1952 if (!temp || strlen(temp) == 0) { 1953 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 1954 loader_tls_heap_free(temp); 1955 loader_heap_free(inst, file_str); 1956 cJSON_Delete(json); 1957 continue; 1958 } 1959 //strip out extra quotes 1960 temp[strlen(temp) - 1] = '\0'; 1961 char *library_path = loader_stack_alloc(strlen(temp) + 1); 1962 strcpy(library_path, &temp[1]); 1963 loader_tls_heap_free(temp); 1964 if (!library_path || strlen(library_path) == 0) { 1965 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 1966 loader_heap_free(inst, file_str); 1967 cJSON_Delete(json); 1968 continue; 1969 } 1970 char fullpath[MAX_STRING_SIZE]; 1971 // Print out the paths being searched if debugging is enabled 1972 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Searching for ICD drivers named %s default dir %s\n", library_path, DEFAULT_VK_DRIVERS_PATH); 1973 if (loader_platform_is_path(library_path)) { 1974 // a relative or absolute path 1975 char *name_copy = loader_stack_alloc(strlen(file_str) + 1); 1976 char *rel_base; 1977 strcpy(name_copy, file_str); 1978 rel_base = loader_platform_dirname(name_copy); 1979 loader_expand_path(library_path, rel_base, sizeof(fullpath), fullpath); 1980 } else { 1981 // a filename which is assumed in a system directory 1982 loader_get_fullpath(library_path, DEFAULT_VK_DRIVERS_PATH, sizeof(fullpath), fullpath); 1983 } 1984 1985 uint32_t vers = 0; 1986 item = cJSON_GetObjectItem(itemICD, "api_version"); 1987 if (item != NULL) { 1988 temp= cJSON_Print(item); 1989 vers = loader_make_version(temp); 1990 loader_tls_heap_free(temp); 1991 } 1992 loader_scanned_icd_add(inst, icds, fullpath, vers); 1993 } 1994 else 1995 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" object in ICD JSON file %s, skipping", file_str); 1996 } 1997 else 1998 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"ICD\" object in ICD JSON file %s, skipping", file_str); 1999 2000 loader_heap_free(inst, file_str); 2001 cJSON_Delete(json); 2002 } 2003 loader_heap_free(inst, manifest_files.filename_list); 2004 loader_platform_thread_unlock_mutex(&loader_json_lock); 2005} 2006 2007 2008void loader_layer_scan( 2009 const struct loader_instance *inst, 2010 struct loader_layer_list *instance_layers, 2011 struct loader_layer_list *device_layers) 2012{ 2013 char *file_str; 2014 struct loader_manifest_files manifest_files; 2015 cJSON *json; 2016 uint32_t i; 2017 2018 // Get a list of manifest files for layers 2019 loader_get_manifest_files(inst, LAYERS_PATH_ENV, true, DEFAULT_VK_LAYERS_INFO, 2020 &manifest_files); 2021 if (manifest_files.count == 0) 2022 return; 2023 2024#if 0 //TODO 2025 /** 2026 * We need a list of the layer libraries, not just a list of 2027 * the layer properties (a layer library could expose more than 2028 * one layer property). This list of scanned layers would be 2029 * used to check for global and physicaldevice layer properties. 2030 */ 2031 if (!loader_init_layer_library_list(&loader.scanned_layer_libraries)) { 2032 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2033 "Alloc for layer list failed: %s line: %d", __FILE__, __LINE__); 2034 return; 2035 } 2036#endif 2037 2038 /* cleanup any previously scanned libraries */ 2039 loader_delete_layer_properties(inst, instance_layers); 2040 loader_delete_layer_properties(inst, device_layers); 2041 2042 loader_platform_thread_lock_mutex(&loader_json_lock); 2043 for (i = 0; i < manifest_files.count; i++) { 2044 file_str = manifest_files.filename_list[i]; 2045 if (file_str == NULL) 2046 continue; 2047 2048 // parse file into JSON struct 2049 json = loader_get_json(file_str); 2050 if (!json) { 2051 continue; 2052 } 2053 2054 //TODO pass in implicit versus explicit bool 2055 //TODO error if device layers expose instance_extensions 2056 //TODO error if instance layers expose device extensions 2057 loader_add_layer_properties(inst, 2058 instance_layers, 2059 device_layers, 2060 json, 2061 false, 2062 file_str); 2063 2064 loader_heap_free(inst, file_str); 2065 cJSON_Delete(json); 2066 } 2067 loader_heap_free(inst, manifest_files.filename_list); 2068 loader_platform_thread_unlock_mutex(&loader_json_lock); 2069} 2070 2071static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_internal(VkInstance inst, const char * pName) 2072{ 2073 // inst is not wrapped 2074 if (inst == VK_NULL_HANDLE) { 2075 return NULL; 2076 } 2077 VkLayerInstanceDispatchTable* disp_table = * (VkLayerInstanceDispatchTable **) inst; 2078 void *addr; 2079 2080 if (!strcmp(pName, "vkGetInstanceProcAddr")) 2081 return (void *) loader_gpa_instance_internal; 2082 2083 if (disp_table == NULL) 2084 return NULL; 2085 2086 addr = loader_lookup_instance_dispatch_table(disp_table, pName); 2087 if (addr) { 2088 return addr; 2089 } 2090 2091 if (disp_table->GetInstanceProcAddr == NULL) { 2092 return NULL; 2093 } 2094 return disp_table->GetInstanceProcAddr(inst, pName); 2095} 2096 2097/** 2098 * Initialize device_ext dispatch table entry as follows: 2099 * If dev == NULL find all logical devices created within this instance and 2100 * init the entry (given by idx) in the ext dispatch table. 2101 * If dev != NULL only initialize the entry in the given dev's dispatch table. 2102 * The initialization value is gotten by calling down the device chain with GDPA. 2103 * If GDPA returns NULL then don't initialize the dispatch table entry. 2104 */ 2105static void loader_init_dispatch_dev_ext_entry(struct loader_instance *inst, 2106 struct loader_device *dev, 2107 uint32_t idx, 2108 const char *funcName) 2109 2110 { 2111 void *gdpa_value; 2112 if (dev != NULL) { 2113 gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr( 2114 dev->device, funcName); 2115 if (gdpa_value != NULL) 2116 dev->loader_dispatch.ext_dispatch.DevExt[idx] = (PFN_vkDevExt) gdpa_value; 2117 } else { 2118 for (uint32_t i = 0; i < inst->total_icd_count; i++) { 2119 struct loader_icd *icd = &inst->icds[i]; 2120 struct loader_device *dev = icd->logical_device_list; 2121 while (dev) { 2122 gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr( 2123 dev->device, funcName); 2124 if (gdpa_value != NULL) 2125 dev->loader_dispatch.ext_dispatch.DevExt[idx] = 2126 (PFN_vkDevExt) gdpa_value; 2127 dev = dev->next; 2128 } 2129 } 2130 } 2131 2132} 2133 2134/** 2135 * Find all dev extension in the hash table and initialize the dispatch table 2136 * for dev for each of those extension entrypoints found in hash table. 2137 2138 */ 2139static void loader_init_dispatch_dev_ext(struct loader_instance *inst, 2140 struct loader_device *dev) 2141{ 2142 for (uint32_t i = 0; i < MAX_NUM_DEV_EXTS; i++) { 2143 if (inst->disp_hash[i].func_name != NULL) 2144 loader_init_dispatch_dev_ext_entry(inst, dev, i, 2145 inst->disp_hash[i].func_name); 2146 } 2147} 2148 2149static bool loader_check_icds_for_address(struct loader_instance *inst, 2150 const char *funcName) 2151{ 2152 struct loader_icd *icd; 2153 icd = inst->icds; 2154 while (icd) { 2155 if (icd->this_icd_lib->GetInstanceProcAddr(icd->instance, funcName)) 2156 // this icd supports funcName 2157 return true; 2158 icd = icd->next; 2159 } 2160 2161 return false; 2162} 2163 2164static void loader_free_dev_ext_table(struct loader_instance *inst) 2165{ 2166 for (uint32_t i = 0; i < MAX_NUM_DEV_EXTS; i++) { 2167 loader_heap_free(inst, inst->disp_hash[i].func_name); 2168 loader_heap_free(inst, inst->disp_hash[i].list.index); 2169 2170 } 2171 memset(inst->disp_hash, 0, sizeof(inst->disp_hash)); 2172} 2173 2174static bool loader_add_dev_ext_table(struct loader_instance *inst, 2175 uint32_t *ptr_idx, 2176 const char *funcName) 2177{ 2178 uint32_t i; 2179 uint32_t idx = *ptr_idx; 2180 struct loader_dispatch_hash_list *list = &inst->disp_hash[idx].list; 2181 2182 if (!inst->disp_hash[idx].func_name) { 2183 // no entry here at this idx, so use it 2184 assert(list->capacity == 0); 2185 inst->disp_hash[idx].func_name = (char *) loader_heap_alloc(inst, 2186 strlen(funcName) + 1, 2187 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2188 if (inst->disp_hash[idx].func_name == NULL) { 2189 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2190 "loader_add_dev_ext_table() can't allocate memory for func_name"); 2191 return false; 2192 } 2193 strncpy(inst->disp_hash[idx].func_name, funcName, strlen(funcName) + 1); 2194 return true; 2195 } 2196 2197 // check for enough capacity 2198 if (list->capacity == 0) { 2199 list->index = loader_heap_alloc(inst, 8 * sizeof(*(list->index)), 2200 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2201 if (list->index == NULL) { 2202 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2203 "loader_add_dev_ext_table() can't allocate list memory"); 2204 return false; 2205 } 2206 list->capacity = 8 * sizeof(*(list->index)); 2207 } else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) { 2208 list->index = loader_heap_realloc(inst, list->index, list->capacity, 2209 list->capacity * 2, 2210 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2211 if (list->index == NULL) { 2212 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2213 "loader_add_dev_ext_table() can't reallocate list memory"); 2214 return false; 2215 } 2216 list->capacity *= 2; 2217 } 2218 2219 //find an unused index in the hash table and use it 2220 i = (idx + 1) % MAX_NUM_DEV_EXTS; 2221 do { 2222 if (!inst->disp_hash[i].func_name) { 2223 assert(inst->disp_hash[i].list.capacity == 0); 2224 inst->disp_hash[i].func_name = (char *) loader_heap_alloc(inst, 2225 strlen(funcName) + 1, 2226 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2227 if (inst->disp_hash[i].func_name == NULL) { 2228 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2229 "loader_add_dev_ext_table() can't rallocate func_name memory"); 2230 return false; 2231 } 2232 strncpy(inst->disp_hash[i].func_name, funcName, strlen(funcName) + 1); 2233 list->index[list->count] = i; 2234 list->count++; 2235 *ptr_idx = i; 2236 return true; 2237 } 2238 i = (i + 1) % MAX_NUM_DEV_EXTS; 2239 } while (i != idx); 2240 2241 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2242 "loader_add_dev_ext_table() couldn't insert into hash table; is it full?"); 2243 return false; 2244} 2245 2246static bool loader_name_in_dev_ext_table(struct loader_instance *inst, 2247 uint32_t *idx, 2248 const char *funcName) 2249{ 2250 uint32_t alt_idx; 2251 if (inst->disp_hash[*idx].func_name && !strcmp( 2252 inst->disp_hash[*idx].func_name, 2253 funcName)) 2254 return true; 2255 2256 // funcName wasn't at the primary spot in the hash table 2257 // search the list of secondary locations (shallow search, not deep search) 2258 for (uint32_t i = 0; i < inst->disp_hash[*idx].list.count; i++) { 2259 alt_idx = inst->disp_hash[*idx].list.index[i]; 2260 if (!strcmp(inst->disp_hash[*idx].func_name, funcName)) { 2261 *idx = alt_idx; 2262 return true; 2263 } 2264 } 2265 2266 return false; 2267} 2268 2269/** 2270 * This function returns generic trampoline code address for unknown entry points. 2271 * Presumably, these unknown entry points (as given by funcName) are device 2272 * extension entrypoints. A hash table is used to keep a list of unknown entry 2273 * points and their mapping to the device extension dispatch table 2274 * (struct loader_dev_ext_dispatch_table). 2275 * \returns 2276 * For a given entry point string (funcName), if an existing mapping is found the 2277 * trampoline address for that mapping is returned. Otherwise, this unknown entry point 2278 * has not been seen yet. Next check if a layer or ICD supports it. If so then a 2279 * new entry in the hash table is initialized and that trampoline address for 2280 * the new entry is returned. Null is returned if the hash table is full or 2281 * if no discovered layer or ICD returns a non-NULL GetProcAddr for it. 2282 */ 2283void *loader_dev_ext_gpa(struct loader_instance *inst, 2284 const char *funcName) 2285{ 2286 uint32_t idx; 2287 uint32_t seed = 0; 2288 2289 idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_DEV_EXTS; 2290 2291 if (loader_name_in_dev_ext_table(inst, &idx, funcName)) 2292 // found funcName already in hash 2293 return loader_get_dev_ext_trampoline(idx); 2294 2295 // Check if funcName is supported in either ICDs or a layer library 2296 if (!loader_check_icds_for_address(inst, funcName)) { 2297 // TODO Add check in layer libraries for support of address 2298 // if support found in layers continue on 2299 return NULL; 2300 } 2301 2302 if (loader_add_dev_ext_table(inst, &idx, funcName)) { 2303 // successfully added new table entry 2304 // init any dev dispatch table entrys as needed 2305 loader_init_dispatch_dev_ext_entry(inst, NULL, idx, funcName); 2306 return loader_get_dev_ext_trampoline(idx); 2307 } 2308 2309 return NULL; 2310} 2311 2312struct loader_instance *loader_get_instance(const VkInstance instance) 2313{ 2314 /* look up the loader_instance in our list by comparing dispatch tables, as 2315 * there is no guarantee the instance is still a loader_instance* after any 2316 * layers which wrap the instance object. 2317 */ 2318 const VkLayerInstanceDispatchTable *disp; 2319 struct loader_instance *ptr_instance = NULL; 2320 disp = loader_get_instance_dispatch(instance); 2321 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 2322 if (inst->disp == disp) { 2323 ptr_instance = inst; 2324 break; 2325 } 2326 } 2327 return ptr_instance; 2328} 2329 2330static loader_platform_dl_handle loader_add_layer_lib( 2331 const struct loader_instance *inst, 2332 const char *chain_type, 2333 struct loader_layer_properties *layer_prop) 2334{ 2335 struct loader_lib_info *new_layer_lib_list, *my_lib; 2336 size_t new_alloc_size; 2337 /* 2338 * TODO: We can now track this information in the 2339 * scanned_layer_libraries list. 2340 */ 2341 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2342 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2343 /* Have already loaded this library, just increment ref count */ 2344 loader.loaded_layer_lib_list[i].ref_count++; 2345 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2346 "%s Chain: Increment layer reference count for layer library %s", 2347 chain_type, layer_prop->lib_name); 2348 return loader.loaded_layer_lib_list[i].lib_handle; 2349 } 2350 } 2351 2352 /* Haven't seen this library so load it */ 2353 new_alloc_size = 0; 2354 if (loader.loaded_layer_lib_capacity == 0) 2355 new_alloc_size = 8 * sizeof(struct loader_lib_info); 2356 else if (loader.loaded_layer_lib_capacity <= loader.loaded_layer_lib_count * 2357 sizeof(struct loader_lib_info)) 2358 new_alloc_size = loader.loaded_layer_lib_capacity * 2; 2359 2360 if (new_alloc_size) { 2361 new_layer_lib_list = loader_heap_realloc( 2362 inst, loader.loaded_layer_lib_list, 2363 loader.loaded_layer_lib_capacity, 2364 new_alloc_size, 2365 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2366 if (!new_layer_lib_list) { 2367 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "loader: realloc failed in loader_add_layer_lib"); 2368 return NULL; 2369 } 2370 loader.loaded_layer_lib_capacity = new_alloc_size; 2371 } else 2372 new_layer_lib_list = loader.loaded_layer_lib_list; 2373 my_lib = &new_layer_lib_list[loader.loaded_layer_lib_count]; 2374 2375 strncpy(my_lib->lib_name, layer_prop->lib_name, sizeof(my_lib->lib_name)); 2376 my_lib->lib_name[sizeof(my_lib->lib_name) - 1] = '\0'; 2377 my_lib->ref_count = 0; 2378 my_lib->lib_handle = NULL; 2379 2380 if ((my_lib->lib_handle = loader_platform_open_library(my_lib->lib_name)) == NULL) { 2381 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2382 loader_platform_open_library_error(my_lib->lib_name)); 2383 return NULL; 2384 } else { 2385 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2386 "Chain: %s: Loading layer library %s", 2387 chain_type, layer_prop->lib_name); 2388 } 2389 loader.loaded_layer_lib_count++; 2390 loader.loaded_layer_lib_list = new_layer_lib_list; 2391 my_lib->ref_count++; 2392 2393 return my_lib->lib_handle; 2394} 2395 2396static void loader_remove_layer_lib( 2397 struct loader_instance *inst, 2398 struct loader_layer_properties *layer_prop) 2399{ 2400 uint32_t idx; 2401 struct loader_lib_info *new_layer_lib_list, *my_lib = NULL; 2402 2403 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2404 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2405 /* found matching library */ 2406 idx = i; 2407 my_lib = &loader.loaded_layer_lib_list[i]; 2408 break; 2409 } 2410 } 2411 2412 if (my_lib) { 2413 my_lib->ref_count--; 2414 if (my_lib->ref_count > 0) { 2415 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2416 "Decrement reference count for layer library %s", layer_prop->lib_name); 2417 return; 2418 } 2419 } 2420 loader_platform_close_library(my_lib->lib_handle); 2421 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2422 "Unloading layer library %s", layer_prop->lib_name); 2423 2424 /* Need to remove unused library from list */ 2425 new_layer_lib_list = loader_heap_alloc(inst, 2426 loader.loaded_layer_lib_capacity, 2427 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2428 if (!new_layer_lib_list) { 2429 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "loader: heap alloc failed loader_remove_layer_library"); 2430 return; 2431 } 2432 2433 if (idx > 0) { 2434 /* Copy records before idx */ 2435 memcpy(new_layer_lib_list, &loader.loaded_layer_lib_list[0], 2436 sizeof(struct loader_lib_info) * idx); 2437 } 2438 if (idx < (loader.loaded_layer_lib_count - 1)) { 2439 /* Copy records after idx */ 2440 memcpy(&new_layer_lib_list[idx], &loader.loaded_layer_lib_list[idx+1], 2441 sizeof(struct loader_lib_info) * (loader.loaded_layer_lib_count - idx - 1)); 2442 } 2443 2444 loader_heap_free(inst, loader.loaded_layer_lib_list); 2445 loader.loaded_layer_lib_count--; 2446 loader.loaded_layer_lib_list = new_layer_lib_list; 2447} 2448 2449 2450/** 2451 * Go through the search_list and find any layers which match type. If layer 2452 * type match is found in then add it to ext_list. 2453 */ 2454//TODO need to handle implict layer enable env var and disable env var 2455static void loader_add_layer_implicit( 2456 const struct loader_instance *inst, 2457 const enum layer_type type, 2458 struct loader_layer_list *list, 2459 const struct loader_layer_list *search_list) 2460{ 2461 uint32_t i; 2462 for (i = 0; i < search_list->count; i++) { 2463 const struct loader_layer_properties *prop = &search_list->list[i]; 2464 if (prop->type & type) { 2465 /* Found an layer with the same type, add to layer_list */ 2466 loader_add_to_layer_list(inst, list, 1, prop); 2467 } 2468 } 2469 2470} 2471 2472/** 2473 * Get the layer name(s) from the env_name environment variable. If layer 2474 * is found in search_list then add it to layer_list. But only add it to 2475 * layer_list if type matches. 2476 */ 2477static void loader_add_layer_env( 2478 const struct loader_instance *inst, 2479 const enum layer_type type, 2480 const char *env_name, 2481 struct loader_layer_list *layer_list, 2482 const struct loader_layer_list *search_list) 2483{ 2484 char *layerEnv; 2485 char *next, *name; 2486 2487 layerEnv = getenv(env_name); 2488 if (layerEnv == NULL) { 2489 return; 2490 } 2491 name = loader_stack_alloc(strlen(layerEnv) + 1); 2492 if (name == NULL) { 2493 return; 2494 } 2495 strcpy(name, layerEnv); 2496 2497 while (name && *name ) { 2498 next = loader_get_next_path(name); 2499 loader_find_layer_name_add_list(inst, name, type, search_list, layer_list); 2500 name = next; 2501 } 2502 2503 return; 2504} 2505 2506void loader_deactivate_instance_layers(struct loader_instance *instance) 2507{ 2508 if (!instance->activated_layer_list.count) { 2509 return; 2510 } 2511 2512 /* Create instance chain of enabled layers */ 2513 for (uint32_t i = 0; i < instance->activated_layer_list.count; i++) { 2514 struct loader_layer_properties *layer_prop = &instance->activated_layer_list.list[i]; 2515 2516 loader_remove_layer_lib(instance, layer_prop); 2517 } 2518 loader_destroy_layer_list(instance, &instance->activated_layer_list); 2519} 2520 2521VkResult loader_enable_instance_layers( 2522 struct loader_instance *inst, 2523 const VkInstanceCreateInfo *pCreateInfo, 2524 const struct loader_layer_list *instance_layers) 2525{ 2526 VkResult err; 2527 2528 assert(inst && "Cannot have null instance"); 2529 2530 if (!loader_init_layer_list(inst, &inst->activated_layer_list)) { 2531 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Instance activated layer list"); 2532 return VK_ERROR_OUT_OF_HOST_MEMORY; 2533 } 2534 2535 /* Add any implicit layers first */ 2536 loader_add_layer_implicit( 2537 inst, 2538 VK_LAYER_TYPE_INSTANCE_IMPLICIT, 2539 &inst->activated_layer_list, 2540 instance_layers); 2541 2542 /* Add any layers specified via environment variable next */ 2543 loader_add_layer_env( 2544 inst, 2545 VK_LAYER_TYPE_INSTANCE_EXPLICIT, 2546 "VK_INSTANCE_LAYERS", 2547 &inst->activated_layer_list, 2548 instance_layers); 2549 2550 /* Add layers specified by the application */ 2551 err = loader_add_layer_names_to_list( 2552 inst, 2553 &inst->activated_layer_list, 2554 pCreateInfo->enabledLayerNameCount, 2555 pCreateInfo->ppEnabledLayerNames, 2556 instance_layers); 2557 2558 return err; 2559} 2560 2561uint32_t loader_activate_instance_layers(struct loader_instance *inst) 2562{ 2563 uint32_t layer_idx; 2564 VkBaseLayerObject *wrappedInstance; 2565 2566 if (inst == NULL) { 2567 return 0; 2568 } 2569 2570 // NOTE inst is unwrapped at this point in time 2571 void* baseObj = (void*) inst; 2572 void* nextObj = (void*) inst; 2573 VkBaseLayerObject *nextInstObj; 2574 PFN_vkGetInstanceProcAddr nextGPA = loader_gpa_instance_internal; 2575 2576 if (!inst->activated_layer_list.count) { 2577 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2578 return 0; 2579 } 2580 2581 wrappedInstance = loader_stack_alloc(sizeof(VkBaseLayerObject) 2582 * inst->activated_layer_list.count); 2583 if (!wrappedInstance) { 2584 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Instance objects for layer"); 2585 return 0; 2586 } 2587 2588 /* Create instance chain of enabled layers */ 2589 layer_idx = inst->activated_layer_list.count - 1; 2590 for (int32_t i = inst->activated_layer_list.count - 1; i >= 0; i--) { 2591 struct loader_layer_properties *layer_prop = &inst->activated_layer_list.list[i]; 2592 loader_platform_dl_handle lib_handle; 2593 2594 /* 2595 * Note: An extension's Get*ProcAddr should not return a function pointer for 2596 * any extension entry points until the extension has been enabled. 2597 * To do this requires a different behavior from Get*ProcAddr functions implemented 2598 * in layers. 2599 * The very first call to a layer will be it's Get*ProcAddr function requesting 2600 * the layer's vkGet*ProcAddr. The layer should initialize its internal dispatch table 2601 * with the wrapped object given (either Instance or Device) and return the layer's 2602 * Get*ProcAddr function. The layer should also use this opportunity to record the 2603 * baseObject so that it can find the correct local dispatch table on future calls. 2604 * Subsequent calls to Get*ProcAddr, CreateInstance, CreateDevice 2605 * will not use a wrapped object and must look up their local dispatch table from 2606 * the given baseObject. 2607 */ 2608 nextInstObj = (wrappedInstance + layer_idx); 2609 nextInstObj->pGPA = (PFN_vkGPA) nextGPA; 2610 nextInstObj->baseObject = baseObj; 2611 nextInstObj->nextObject = nextObj; 2612 nextObj = (void*) nextInstObj; 2613 2614 lib_handle = loader_add_layer_lib(inst, "instance", layer_prop); 2615 if (!lib_handle) 2616 continue; // TODO what should we do in this case 2617 if ((nextGPA = layer_prop->functions.get_instance_proc_addr) == NULL) { 2618 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) { 2619 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); 2620 layer_prop->functions.get_instance_proc_addr = nextGPA; 2621 } else 2622 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa); 2623 if (!nextGPA) { 2624 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to find vkGetInstanceProcAddr in layer %s", layer_prop->lib_name); 2625 2626 /* TODO: Should we return nextObj, nextGPA to previous? or decrement layer_list count*/ 2627 continue; 2628 } 2629 } 2630 2631 loader_log(VK_DBG_REPORT_INFO_BIT, 0, 2632 "Insert instance layer %s (%s)", 2633 layer_prop->info.layerName, 2634 layer_prop->lib_name); 2635 2636 layer_idx--; 2637 } 2638 2639 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2640 2641 return inst->activated_layer_list.count; 2642} 2643 2644void loader_activate_instance_layer_extensions(struct loader_instance *inst) 2645{ 2646 2647 loader_init_instance_extension_dispatch_table(inst->disp, 2648 inst->disp->GetInstanceProcAddr, 2649 (VkInstance) inst); 2650} 2651 2652static VkResult loader_enable_device_layers( 2653 const struct loader_instance *inst, 2654 struct loader_icd *icd, 2655 struct loader_device *dev, 2656 const VkDeviceCreateInfo *pCreateInfo, 2657 const struct loader_layer_list *device_layers) 2658 2659{ 2660 VkResult err; 2661 2662 assert(dev && "Cannot have null device"); 2663 2664 if (dev->activated_layer_list.list == NULL || dev->activated_layer_list.capacity == 0) { 2665 loader_init_layer_list(inst, &dev->activated_layer_list); 2666 } 2667 2668 if (dev->activated_layer_list.list == NULL) { 2669 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc device activated layer list"); 2670 return VK_ERROR_OUT_OF_HOST_MEMORY; 2671 } 2672 2673 /* Add any implicit layers first */ 2674 loader_add_layer_implicit( 2675 inst, 2676 VK_LAYER_TYPE_DEVICE_IMPLICIT, 2677 &dev->activated_layer_list, 2678 device_layers); 2679 2680 /* Add any layers specified via environment variable next */ 2681 loader_add_layer_env( 2682 inst, 2683 VK_LAYER_TYPE_DEVICE_EXPLICIT, 2684 "VK_DEVICE_LAYERS", 2685 &dev->activated_layer_list, 2686 device_layers); 2687 2688 /* Add layers specified by the application */ 2689 err = loader_add_layer_names_to_list( 2690 inst, 2691 &dev->activated_layer_list, 2692 pCreateInfo->enabledLayerNameCount, 2693 pCreateInfo->ppEnabledLayerNames, 2694 device_layers); 2695 2696 return err; 2697} 2698 2699/* 2700 * This function terminates the device chain for CreateDevice. 2701 * CreateDevice is a special case and so the loader call's 2702 * the ICD's CreateDevice before creating the chain. Since 2703 * we can't call CreateDevice twice we must terminate the 2704 * device chain with something else. 2705 */ 2706static VKAPI_ATTR VkResult VKAPI_CALL scratch_vkCreateDevice( 2707 VkPhysicalDevice physicalDevice, 2708 const VkDeviceCreateInfo *pCreateInfo, 2709 const VkAllocationCallbacks* pAllocator, 2710 VkDevice *pDevice) 2711{ 2712 return VK_SUCCESS; 2713} 2714 2715static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_GetDeviceChainProcAddr(VkDevice device, const char * name) 2716{ 2717 if (!strcmp(name, "vkGetDeviceProcAddr")) 2718 return (PFN_vkVoidFunction) loader_GetDeviceChainProcAddr; 2719 if (!strcmp(name, "vkCreateDevice")) 2720 return (PFN_vkVoidFunction) scratch_vkCreateDevice; 2721 2722 struct loader_device *found_dev; 2723 struct loader_icd *icd = loader_get_icd_and_device(device, &found_dev); 2724 return icd->GetDeviceProcAddr(device, name); 2725} 2726 2727static uint32_t loader_activate_device_layers( 2728 const struct loader_instance *inst, 2729 struct loader_device *dev, 2730 VkDevice device) 2731{ 2732 if (!dev) { 2733 return 0; 2734 } 2735 2736 /* activate any layer libraries */ 2737 void* nextObj = (void*) device; 2738 void* baseObj = nextObj; 2739 VkBaseLayerObject *nextGpuObj; 2740 PFN_vkGetDeviceProcAddr nextGPA = loader_GetDeviceChainProcAddr; 2741 VkBaseLayerObject *wrappedGpus; 2742 2743 if (!dev->activated_layer_list.count) { 2744 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2745 (VkDevice) nextObj, (VkDevice) baseObj); 2746 return 0; 2747 } 2748 2749 wrappedGpus = loader_heap_alloc(inst, 2750 sizeof (VkBaseLayerObject) * dev->activated_layer_list.count, 2751 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2752 if (!wrappedGpus) { 2753 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Gpu objects for layer"); 2754 return 0; 2755 } 2756 2757 for (int32_t i = dev->activated_layer_list.count - 1; i >= 0; i--) { 2758 2759 struct loader_layer_properties *layer_prop = &dev->activated_layer_list.list[i]; 2760 loader_platform_dl_handle lib_handle; 2761 2762 nextGpuObj = (wrappedGpus + i); 2763 nextGpuObj->pGPA = (PFN_vkGPA)nextGPA; 2764 nextGpuObj->baseObject = baseObj; 2765 nextGpuObj->nextObject = nextObj; 2766 nextObj = (void*) nextGpuObj; 2767 2768 lib_handle = loader_add_layer_lib(inst, "device", layer_prop); 2769 if ((nextGPA = layer_prop->functions.get_device_proc_addr) == NULL) { 2770 if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) { 2771 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr"); 2772 layer_prop->functions.get_device_proc_addr = nextGPA; 2773 } else 2774 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa); 2775 if (!nextGPA) { 2776 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to find vkGetDeviceProcAddr in layer %s", layer_prop->lib_name); 2777 continue; 2778 } 2779 } 2780 2781 loader_log(VK_DBG_REPORT_INFO_BIT, 0, 2782 "Insert device layer library %s (%s)", 2783 layer_prop->info.layerName, 2784 layer_prop->lib_name); 2785 2786 } 2787 2788 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2789 (VkDevice) nextObj, (VkDevice) baseObj); 2790 loader_heap_free(inst, wrappedGpus); 2791 2792 return dev->activated_layer_list.count; 2793} 2794 2795VkResult loader_validate_layers( 2796 const uint32_t layer_count, 2797 const char * const *ppEnabledLayerNames, 2798 const struct loader_layer_list *list) 2799{ 2800 struct loader_layer_properties *prop; 2801 2802 for (uint32_t i = 0; i < layer_count; i++) { 2803 prop = loader_get_layer_property(ppEnabledLayerNames[i], 2804 list); 2805 if (!prop) { 2806 return VK_ERROR_LAYER_NOT_PRESENT; 2807 } 2808 } 2809 2810 return VK_SUCCESS; 2811} 2812 2813VkResult loader_validate_instance_extensions( 2814 const struct loader_extension_list *icd_exts, 2815 const struct loader_layer_list *instance_layer, 2816 const VkInstanceCreateInfo *pCreateInfo) 2817{ 2818 VkExtensionProperties *extension_prop; 2819 struct loader_layer_properties *layer_prop; 2820 2821 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 2822 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2823 icd_exts); 2824 2825 if (extension_prop) { 2826 continue; 2827 } 2828 2829 extension_prop = NULL; 2830 2831 /* Not in global list, search layer extension lists */ 2832 for (uint32_t j = 0; j < pCreateInfo->enabledLayerNameCount; j++) { 2833 layer_prop = loader_get_layer_property(pCreateInfo->ppEnabledLayerNames[i], 2834 instance_layer); 2835 if (!layer_prop) { 2836 /* Should NOT get here, loader_validate_layers 2837 * should have already filtered this case out. 2838 */ 2839 continue; 2840 } 2841 2842 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2843 &layer_prop->instance_extension_list); 2844 if (extension_prop) { 2845 /* Found the extension in one of the layers enabled by the app. */ 2846 break; 2847 } 2848 } 2849 2850 if (!extension_prop) { 2851 /* Didn't find extension name in any of the global layers, error out */ 2852 return VK_ERROR_EXTENSION_NOT_PRESENT; 2853 } 2854 } 2855 return VK_SUCCESS; 2856} 2857 2858VkResult loader_validate_device_extensions( 2859 struct loader_physical_device *phys_dev, 2860 const struct loader_layer_list *device_layer, 2861 const VkDeviceCreateInfo *pCreateInfo) 2862{ 2863 VkExtensionProperties *extension_prop; 2864 struct loader_layer_properties *layer_prop; 2865 2866 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 2867 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 2868 extension_prop = get_extension_property(extension_name, 2869 &phys_dev->device_extension_cache); 2870 2871 if (extension_prop) { 2872 continue; 2873 } 2874 2875 /* Not in global list, search layer extension lists */ 2876 for (uint32_t j = 0; j < pCreateInfo->enabledLayerNameCount; j++) { 2877 const char *layer_name = pCreateInfo->ppEnabledLayerNames[j]; 2878 layer_prop = loader_get_layer_property(layer_name, 2879 device_layer); 2880 2881 if (!layer_prop) { 2882 /* Should NOT get here, loader_validate_instance_layers 2883 * should have already filtered this case out. 2884 */ 2885 continue; 2886 } 2887 2888 extension_prop = get_extension_property(extension_name, 2889 &layer_prop->device_extension_list); 2890 if (extension_prop) { 2891 /* Found the extension in one of the layers enabled by the app. */ 2892 break; 2893 } 2894 } 2895 2896 if (!extension_prop) { 2897 /* Didn't find extension name in any of the device layers, error out */ 2898 return VK_ERROR_EXTENSION_NOT_PRESENT; 2899 } 2900 } 2901 return VK_SUCCESS; 2902} 2903 2904VKAPI_ATTR VkResult VKAPI_CALL loader_CreateInstance( 2905 const VkInstanceCreateInfo* pCreateInfo, 2906 const VkAllocationCallbacks* pAllocator, 2907 VkInstance* pInstance) 2908{ 2909 struct loader_instance *ptr_instance = *(struct loader_instance **) pInstance; 2910 struct loader_icd *icd; 2911 VkExtensionProperties *prop; 2912 char **filtered_extension_names = NULL; 2913 VkInstanceCreateInfo icd_create_info; 2914 VkResult res = VK_SUCCESS; 2915 bool success = false; 2916 2917 memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info)); 2918 2919 icd_create_info.enabledLayerNameCount = 0; 2920 icd_create_info.ppEnabledLayerNames = NULL; 2921 2922 /* 2923 * NOTE: Need to filter the extensions to only those 2924 * supported by the ICD. 2925 * No ICD will advertise support for layers. An ICD 2926 * library could support a layer, but it would be 2927 * independent of the actual ICD, just in the same library. 2928 */ 2929 filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionNameCount * sizeof(char *)); 2930 if (!filtered_extension_names) { 2931 return VK_ERROR_OUT_OF_HOST_MEMORY; 2932 } 2933 icd_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 2934 2935 for (uint32_t i = 0; i < ptr_instance->icd_libs.count; i++) { 2936 icd = loader_icd_add(ptr_instance, &ptr_instance->icd_libs.list[i]); 2937 if (icd) { 2938 icd_create_info.enabledExtensionNameCount = 0; 2939 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 2940 prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2941 &ptr_instance->ext_list); 2942 if (prop) { 2943 filtered_extension_names[icd_create_info.enabledExtensionNameCount] = (char *) pCreateInfo->ppEnabledExtensionNames[i]; 2944 icd_create_info.enabledExtensionNameCount++; 2945 } 2946 } 2947 2948 res = ptr_instance->icd_libs.list[i].CreateInstance(&icd_create_info, 2949 pAllocator, 2950 &(icd->instance)); 2951 if (res == VK_SUCCESS) 2952 success = loader_icd_init_entrys( 2953 icd, 2954 icd->instance, 2955 ptr_instance->icd_libs.list[i].GetInstanceProcAddr); 2956 2957 if (res != VK_SUCCESS || !success) 2958 { 2959 ptr_instance->icds = ptr_instance->icds->next; 2960 loader_icd_destroy(ptr_instance, icd); 2961 icd->instance = VK_NULL_HANDLE; 2962 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2963 "ICD ignored: failed to CreateInstance and find entrypoints with ICD"); 2964 } 2965 } 2966 } 2967 2968 /* 2969 * If no ICDs were added to instance list and res is unchanged 2970 * from it's initial value, the loader was unable to find 2971 * a suitable ICD. 2972 */ 2973 if (ptr_instance->icds == NULL) { 2974 if (res == VK_SUCCESS) { 2975 return VK_ERROR_INCOMPATIBLE_DRIVER; 2976 } else { 2977 return res; 2978 } 2979 } 2980 2981 return VK_SUCCESS; 2982} 2983 2984VKAPI_ATTR void VKAPI_CALL loader_DestroyInstance( 2985 VkInstance instance, 2986 const VkAllocationCallbacks* pAllocator) 2987{ 2988 struct loader_instance *ptr_instance = loader_instance(instance); 2989 struct loader_icd *icds = ptr_instance->icds; 2990 struct loader_icd *next_icd; 2991 2992 // Remove this instance from the list of instances: 2993 struct loader_instance *prev = NULL; 2994 struct loader_instance *next = loader.instances; 2995 while (next != NULL) { 2996 if (next == ptr_instance) { 2997 // Remove this instance from the list: 2998 if (prev) 2999 prev->next = next->next; 3000 else 3001 loader.instances = next->next; 3002 break; 3003 } 3004 prev = next; 3005 next = next->next; 3006 } 3007 3008 while (icds) { 3009 if (icds->instance) { 3010 icds->DestroyInstance(icds->instance, pAllocator); 3011 } 3012 next_icd = icds->next; 3013 icds->instance = VK_NULL_HANDLE; 3014 loader_icd_destroy(ptr_instance, icds); 3015 3016 icds = next_icd; 3017 } 3018 loader_delete_layer_properties(ptr_instance, &ptr_instance->device_layer_list); 3019 loader_delete_layer_properties(ptr_instance, &ptr_instance->instance_layer_list); 3020 loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_libs); 3021 loader_destroy_ext_list(ptr_instance, &ptr_instance->ext_list); 3022 for (uint32_t i = 0; i < ptr_instance->total_gpu_count; i++) 3023 loader_destroy_ext_list(ptr_instance, &ptr_instance->phys_devs[i].device_extension_cache); 3024 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3025 loader_free_dev_ext_table(ptr_instance); 3026} 3027 3028VkResult loader_init_physical_device_info(struct loader_instance *ptr_instance) 3029{ 3030 struct loader_icd *icd; 3031 uint32_t i, j, idx, count = 0; 3032 VkResult res; 3033 struct loader_phys_dev_per_icd *phys_devs; 3034 3035 ptr_instance->total_gpu_count = 0; 3036 phys_devs = (struct loader_phys_dev_per_icd *) loader_stack_alloc( 3037 sizeof(struct loader_phys_dev_per_icd) * 3038 ptr_instance->total_icd_count); 3039 if (!phys_devs) 3040 return VK_ERROR_OUT_OF_HOST_MEMORY; 3041 3042 icd = ptr_instance->icds; 3043 for (i = 0; i < ptr_instance->total_icd_count; i++) { 3044 assert(icd); 3045 res = icd->EnumeratePhysicalDevices(icd->instance, &phys_devs[i].count, NULL); 3046 if (res != VK_SUCCESS) 3047 return res; 3048 count += phys_devs[i].count; 3049 icd = icd->next; 3050 } 3051 3052 ptr_instance->phys_devs = (struct loader_physical_device *) loader_heap_alloc( 3053 ptr_instance, 3054 count * sizeof(struct loader_physical_device), 3055 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 3056 if (!ptr_instance->phys_devs) 3057 return VK_ERROR_OUT_OF_HOST_MEMORY; 3058 3059 icd = ptr_instance->icds; 3060 3061 struct loader_physical_device *inst_phys_devs = ptr_instance->phys_devs; 3062 idx = 0; 3063 for (i = 0; i < ptr_instance->total_icd_count; i++) { 3064 assert(icd); 3065 3066 phys_devs[i].phys_devs = (VkPhysicalDevice *) loader_stack_alloc( 3067 phys_devs[i].count * sizeof(VkPhysicalDevice)); 3068 if (!phys_devs[i].phys_devs) { 3069 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3070 ptr_instance->phys_devs = NULL; 3071 return VK_ERROR_OUT_OF_HOST_MEMORY; 3072 } 3073 res = icd->EnumeratePhysicalDevices( 3074 icd->instance, 3075 &(phys_devs[i].count), 3076 phys_devs[i].phys_devs); 3077 if ((res == VK_SUCCESS)) { 3078 ptr_instance->total_gpu_count += phys_devs[i].count; 3079 for (j = 0; j < phys_devs[i].count; j++) { 3080 3081 // initialize the loader's physicalDevice object 3082 loader_set_dispatch((void *) &inst_phys_devs[idx], ptr_instance->disp); 3083 inst_phys_devs[idx].this_instance = ptr_instance; 3084 inst_phys_devs[idx].this_icd = icd; 3085 inst_phys_devs[idx].phys_dev = phys_devs[i].phys_devs[j]; 3086 memset(&inst_phys_devs[idx].device_extension_cache, 0, sizeof(struct loader_extension_list)); 3087 3088 idx++; 3089 } 3090 } else { 3091 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3092 ptr_instance->phys_devs = NULL; 3093 return res; 3094 } 3095 3096 icd = icd->next; 3097 } 3098 3099 return VK_SUCCESS; 3100} 3101 3102VKAPI_ATTR VkResult VKAPI_CALL loader_EnumeratePhysicalDevices( 3103 VkInstance instance, 3104 uint32_t* pPhysicalDeviceCount, 3105 VkPhysicalDevice* pPhysicalDevices) 3106{ 3107 uint32_t i; 3108 struct loader_instance *ptr_instance = (struct loader_instance *) instance; 3109 VkResult res = VK_SUCCESS; 3110 3111 if (ptr_instance->total_gpu_count == 0) { 3112 res = loader_init_physical_device_info(ptr_instance); 3113 } 3114 3115 *pPhysicalDeviceCount = ptr_instance->total_gpu_count; 3116 if (!pPhysicalDevices) { 3117 return res; 3118 } 3119 3120 for (i = 0; i < ptr_instance->total_gpu_count; i++) { 3121 pPhysicalDevices[i] = (VkPhysicalDevice) &ptr_instance->phys_devs[i]; 3122 } 3123 3124 return res; 3125} 3126 3127VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceProperties( 3128 VkPhysicalDevice physicalDevice, 3129 VkPhysicalDeviceProperties* pProperties) 3130{ 3131 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3132 struct loader_icd *icd = phys_dev->this_icd; 3133 3134 if (icd->GetPhysicalDeviceProperties) 3135 icd->GetPhysicalDeviceProperties(phys_dev->phys_dev, pProperties); 3136} 3137 3138VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceQueueFamilyProperties ( 3139 VkPhysicalDevice physicalDevice, 3140 uint32_t* pQueueFamilyPropertyCount, 3141 VkQueueFamilyProperties* pProperties) 3142{ 3143 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3144 struct loader_icd *icd = phys_dev->this_icd; 3145 3146 if (icd->GetPhysicalDeviceQueueFamilyProperties) 3147 icd->GetPhysicalDeviceQueueFamilyProperties(phys_dev->phys_dev, pQueueFamilyPropertyCount, pProperties); 3148} 3149 3150VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceMemoryProperties ( 3151 VkPhysicalDevice physicalDevice, 3152 VkPhysicalDeviceMemoryProperties* pProperties) 3153{ 3154 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3155 struct loader_icd *icd = phys_dev->this_icd; 3156 3157 if (icd->GetPhysicalDeviceMemoryProperties) 3158 icd->GetPhysicalDeviceMemoryProperties(phys_dev->phys_dev, pProperties); 3159} 3160 3161VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceFeatures( 3162 VkPhysicalDevice physicalDevice, 3163 VkPhysicalDeviceFeatures* pFeatures) 3164{ 3165 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3166 struct loader_icd *icd = phys_dev->this_icd; 3167 3168 if (icd->GetPhysicalDeviceFeatures) 3169 icd->GetPhysicalDeviceFeatures(phys_dev->phys_dev, pFeatures); 3170} 3171 3172VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceFormatProperties( 3173 VkPhysicalDevice physicalDevice, 3174 VkFormat format, 3175 VkFormatProperties* pFormatInfo) 3176{ 3177 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3178 struct loader_icd *icd = phys_dev->this_icd; 3179 3180 if (icd->GetPhysicalDeviceFormatProperties) 3181 icd->GetPhysicalDeviceFormatProperties(phys_dev->phys_dev, format, pFormatInfo); 3182} 3183 3184VKAPI_ATTR VkResult VKAPI_CALL loader_GetPhysicalDeviceImageFormatProperties( 3185 VkPhysicalDevice physicalDevice, 3186 VkFormat format, 3187 VkImageType type, 3188 VkImageTiling tiling, 3189 VkImageUsageFlags usage, 3190 VkImageCreateFlags flags, 3191 VkImageFormatProperties* pImageFormatProperties) 3192{ 3193 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3194 struct loader_icd *icd = phys_dev->this_icd; 3195 3196 if (!icd->GetPhysicalDeviceImageFormatProperties) 3197 return VK_ERROR_INITIALIZATION_FAILED; 3198 3199 return icd->GetPhysicalDeviceImageFormatProperties(phys_dev->phys_dev, format, 3200 type, tiling, usage, flags, pImageFormatProperties); 3201} 3202 3203VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceSparseImageFormatProperties( 3204 VkPhysicalDevice physicalDevice, 3205 VkFormat format, 3206 VkImageType type, 3207 VkSampleCountFlagBits samples, 3208 VkImageUsageFlags usage, 3209 VkImageTiling tiling, 3210 uint32_t* pNumProperties, 3211 VkSparseImageFormatProperties* pProperties) 3212{ 3213 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3214 struct loader_icd *icd = phys_dev->this_icd; 3215 3216 if (icd->GetPhysicalDeviceSparseImageFormatProperties) 3217 icd->GetPhysicalDeviceSparseImageFormatProperties(phys_dev->phys_dev, format, type, samples, usage, tiling, pNumProperties, pProperties); 3218} 3219 3220VKAPI_ATTR VkResult VKAPI_CALL loader_CreateDevice( 3221 VkPhysicalDevice physicalDevice, 3222 const VkDeviceCreateInfo* pCreateInfo, 3223 const VkAllocationCallbacks* pAllocator, 3224 VkDevice* pDevice) 3225{ 3226 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3227 struct loader_icd *icd = phys_dev->this_icd; 3228 struct loader_device *dev; 3229 struct loader_instance *inst; 3230 VkDeviceCreateInfo device_create_info; 3231 char **filtered_extension_names = NULL; 3232 VkResult res; 3233 3234 assert(pCreateInfo->queueCreateInfoCount >= 1); 3235 3236 if (!icd) 3237 return VK_ERROR_INITIALIZATION_FAILED; 3238 3239 inst = phys_dev->this_instance; 3240 3241 if (!icd->CreateDevice) { 3242 return VK_ERROR_INITIALIZATION_FAILED; 3243 } 3244 3245 /* validate any app enabled layers are available */ 3246 if (pCreateInfo->enabledLayerNameCount > 0) { 3247 res = loader_validate_layers(pCreateInfo->enabledLayerNameCount, 3248 pCreateInfo->ppEnabledLayerNames, 3249 &inst->device_layer_list); 3250 if (res != VK_SUCCESS) { 3251 return res; 3252 } 3253 } 3254 3255 /* Get the physical device extensions if they haven't been retrieved yet */ 3256 if (phys_dev->device_extension_cache.capacity == 0) { 3257 if (!loader_init_ext_list(inst, &phys_dev->device_extension_cache)) { 3258 return VK_ERROR_OUT_OF_HOST_MEMORY; 3259 } 3260 res = loader_add_physical_device_extensions( 3261 inst, physicalDevice, 3262 phys_dev->this_icd->this_icd_lib->lib_name, 3263 &phys_dev->device_extension_cache); 3264 if (res != VK_SUCCESS) { 3265 return res; 3266 } 3267 } 3268 /* make sure requested extensions to be enabled are supported */ 3269 res = loader_validate_device_extensions(phys_dev, &inst->device_layer_list, pCreateInfo); 3270 if (res != VK_SUCCESS) { 3271 return res; 3272 } 3273 3274 /* 3275 * NOTE: Need to filter the extensions to only those 3276 * supported by the ICD. 3277 * No ICD will advertise support for layers. An ICD 3278 * library could support a layer, but it would be 3279 * independent of the actual ICD, just in the same library. 3280 */ 3281 filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionNameCount * sizeof(char *)); 3282 if (!filtered_extension_names) { 3283 return VK_ERROR_OUT_OF_HOST_MEMORY; 3284 } 3285 3286 /* Copy user's data */ 3287 memcpy(&device_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo)); 3288 3289 /* ICD's do not use layers */ 3290 device_create_info.enabledLayerNameCount = 0; 3291 device_create_info.ppEnabledLayerNames = NULL; 3292 3293 device_create_info.enabledExtensionNameCount = 0; 3294 device_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 3295 3296 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 3297 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 3298 VkExtensionProperties *prop = get_extension_property(extension_name, 3299 &phys_dev->device_extension_cache); 3300 if (prop) { 3301 filtered_extension_names[device_create_info.enabledExtensionNameCount] = (char *) extension_name; 3302 device_create_info.enabledExtensionNameCount++; 3303 } 3304 } 3305 3306 // since physicalDevice object maybe wrapped by a layer need to get unwrapped version 3307 // we haven't yet called down the chain for the layer to unwrap the object 3308 res = icd->CreateDevice(phys_dev->phys_dev, pCreateInfo, pAllocator, pDevice); 3309 if (res != VK_SUCCESS) { 3310 return res; 3311 } 3312 3313 dev = loader_add_logical_device(inst, *pDevice, &icd->logical_device_list); 3314 if (dev == NULL) { 3315 return VK_ERROR_OUT_OF_HOST_MEMORY; 3316 } 3317 3318 loader_init_dispatch(*pDevice, &dev->loader_dispatch); 3319 3320 /* activate any layers on device chain which terminates with device*/ 3321 res = loader_enable_device_layers(inst, icd, dev, pCreateInfo, &inst->device_layer_list); 3322 if (res != VK_SUCCESS) { 3323 loader_destroy_logical_device(inst, dev); 3324 return res; 3325 } 3326 loader_activate_device_layers(inst, dev, *pDevice); 3327 3328 /* initialize any device extension dispatch entry's from the instance list*/ 3329 loader_init_dispatch_dev_ext(inst, dev); 3330 3331 /* finally can call down the chain */ 3332 res = dev->loader_dispatch.core_dispatch.CreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice); 3333 3334 /* initialize WSI device extensions as part of core dispatch since loader has 3335 * dedicated trampoline code for these*/ 3336 loader_init_device_extension_dispatch_table(&dev->loader_dispatch, 3337 dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, 3338 *pDevice); 3339 dev->loader_dispatch.core_dispatch.CreateDevice = icd->CreateDevice; 3340 3341 return res; 3342} 3343 3344/** 3345 * Get an instance level or global level entry point address. 3346 * @param instance 3347 * @param pName 3348 * @return 3349 * If instance == NULL returns a global level functions only 3350 * If instance is valid returns a trampoline entry point for all dispatchable Vulkan 3351 * functions both core and extensions. 3352 */ 3353LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char * pName) 3354{ 3355 3356 void *addr; 3357 3358 addr = globalGetProcAddr(pName); 3359 if (instance == VK_NULL_HANDLE) { 3360 // get entrypoint addresses that are global (no dispatchable object) 3361 3362 return addr; 3363 } else { 3364 // if a global entrypoint return NULL 3365 if (addr) 3366 return NULL; 3367 } 3368 3369 struct loader_instance *ptr_instance = loader_get_instance(instance); 3370 if (ptr_instance == NULL) 3371 return NULL; 3372 // Return trampoline code for non-global entrypoints including any extensions. 3373 // Device extensions are returned if a layer or ICD supports the extension. 3374 // Instance extensions are returned if the extension is enabled and the loader 3375 // or someone else supports the extension 3376 return trampolineGetProcAddr(ptr_instance, pName); 3377 3378} 3379 3380/** 3381 * Get a device level or global level entry point address. 3382 * @param device 3383 * @param pName 3384 * @return 3385 * If device is valid, returns a device relative entry point for device level 3386 * entry points both core and extensions. 3387 * Device relative means call down the device chain. 3388 */ 3389LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char * pName) 3390{ 3391 void *addr; 3392 3393 /* for entrypoints that loader must handle (ie non-dispatchable or create object) 3394 make sure the loader entrypoint is returned */ 3395 addr = loader_non_passthrough_gdpa(pName); 3396 if (addr) { 3397 return addr; 3398 } 3399 3400 /* Although CreateDevice is on device chain it's dispatchable object isn't 3401 * a VkDevice or child of VkDevice so return NULL. 3402 */ 3403 if (!strcmp(pName, "CreateDevice")) 3404 return NULL; 3405 3406 /* return the dispatch table entrypoint for the fastest case */ 3407 const VkLayerDispatchTable *disp_table = * (VkLayerDispatchTable **) device; 3408 if (disp_table == NULL) 3409 return NULL; 3410 3411 addr = loader_lookup_device_dispatch_table(disp_table, pName); 3412 if (addr) 3413 return addr; 3414 3415 if (disp_table->GetDeviceProcAddr == NULL) 3416 return NULL; 3417 return disp_table->GetDeviceProcAddr(device, pName); 3418} 3419 3420LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( 3421 const char* pLayerName, 3422 uint32_t* pPropertyCount, 3423 VkExtensionProperties* pProperties) 3424{ 3425 struct loader_extension_list *global_ext_list=NULL; 3426 struct loader_layer_list instance_layers; 3427 struct loader_extension_list icd_extensions; 3428 struct loader_icd_libs icd_libs; 3429 uint32_t copy_size; 3430 3431 tls_instance = NULL; 3432 memset(&icd_extensions, 0, sizeof(icd_extensions)); 3433 memset(&instance_layers, 0, sizeof(instance_layers)); 3434 loader_platform_thread_once(&once_init, loader_initialize); 3435 3436 /* get layer libraries if needed */ 3437 if (pLayerName && strlen(pLayerName) != 0) { 3438 loader_layer_scan(NULL, &instance_layers, NULL); 3439 for (uint32_t i = 0; i < instance_layers.count; i++) { 3440 struct loader_layer_properties *props = &instance_layers.list[i]; 3441 if (strcmp(props->info.layerName, pLayerName) == 0) { 3442 global_ext_list = &props->instance_extension_list; 3443 } 3444 } 3445 } 3446 else { 3447 /* Scan/discover all ICD libraries */ 3448 memset(&icd_libs, 0 , sizeof(struct loader_icd_libs)); 3449 loader_icd_scan(NULL, &icd_libs); 3450 /* get extensions from all ICD's, merge so no duplicates */ 3451 loader_get_icd_loader_instance_extensions(NULL, &icd_libs, &icd_extensions); 3452 loader_scanned_icd_clear(NULL, &icd_libs); 3453 global_ext_list = &icd_extensions; 3454 } 3455 3456 if (global_ext_list == NULL) { 3457 loader_destroy_layer_list(NULL, &instance_layers); 3458 return VK_ERROR_LAYER_NOT_PRESENT; 3459 } 3460 3461 if (pProperties == NULL) { 3462 *pPropertyCount = global_ext_list->count; 3463 loader_destroy_layer_list(NULL, &instance_layers); 3464 loader_destroy_ext_list(NULL, &icd_extensions); 3465 return VK_SUCCESS; 3466 } 3467 3468 copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count; 3469 for (uint32_t i = 0; i < copy_size; i++) { 3470 memcpy(&pProperties[i], 3471 &global_ext_list->list[i], 3472 sizeof(VkExtensionProperties)); 3473 } 3474 *pPropertyCount = copy_size; 3475 loader_destroy_ext_list(NULL, &icd_extensions); 3476 3477 if (copy_size < global_ext_list->count) { 3478 loader_destroy_layer_list(NULL, &instance_layers); 3479 return VK_INCOMPLETE; 3480 } 3481 3482 loader_destroy_layer_list(NULL, &instance_layers); 3483 return VK_SUCCESS; 3484} 3485 3486LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( 3487 uint32_t* pPropertyCount, 3488 VkLayerProperties* pProperties) 3489{ 3490 3491 struct loader_layer_list instance_layer_list; 3492 tls_instance = NULL; 3493 3494 loader_platform_thread_once(&once_init, loader_initialize); 3495 3496 uint32_t copy_size; 3497 3498 /* get layer libraries */ 3499 memset(&instance_layer_list, 0, sizeof(instance_layer_list)); 3500 loader_layer_scan(NULL, &instance_layer_list, NULL); 3501 3502 if (pProperties == NULL) { 3503 *pPropertyCount = instance_layer_list.count; 3504 loader_destroy_layer_list(NULL, &instance_layer_list); 3505 return VK_SUCCESS; 3506 } 3507 3508 copy_size = (*pPropertyCount < instance_layer_list.count) ? *pPropertyCount : instance_layer_list.count; 3509 for (uint32_t i = 0; i < copy_size; i++) { 3510 memcpy(&pProperties[i], &instance_layer_list.list[i].info, sizeof(VkLayerProperties)); 3511 } 3512 *pPropertyCount = copy_size; 3513 loader_destroy_layer_list(NULL, &instance_layer_list); 3514 3515 if (copy_size < instance_layer_list.count) { 3516 return VK_INCOMPLETE; 3517 } 3518 3519 return VK_SUCCESS; 3520} 3521 3522VKAPI_ATTR VkResult VKAPI_CALL loader_EnumerateDeviceExtensionProperties( 3523 VkPhysicalDevice physicalDevice, 3524 const char* pLayerName, 3525 uint32_t* pPropertyCount, 3526 VkExtensionProperties* pProperties) 3527{ 3528 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3529 uint32_t copy_size; 3530 3531 uint32_t count; 3532 struct loader_extension_list *dev_ext_list=NULL; 3533 3534 /* get layer libraries if needed */ 3535 if (pLayerName && strlen(pLayerName) != 0) { 3536 for (uint32_t i = 0; i < phys_dev->this_instance->device_layer_list.count; i++) { 3537 struct loader_layer_properties *props = &phys_dev->this_instance->device_layer_list.list[i]; 3538 if (strcmp(props->info.layerName, pLayerName) == 0) { 3539 dev_ext_list = &props->device_extension_list; 3540 } 3541 } 3542 } 3543 else { 3544 /* this case is during the call down the instance chain */ 3545 struct loader_icd *icd = phys_dev->this_icd; 3546 VkResult res; 3547 res = icd->EnumerateDeviceExtensionProperties(phys_dev->phys_dev, NULL, pPropertyCount, pProperties); 3548 if (pProperties != NULL && res == VK_SUCCESS) { 3549 /* initialize dev_extension list within the physicalDevice object */ 3550 res = loader_init_physical_device_extensions(phys_dev->this_instance, 3551 phys_dev, *pPropertyCount, pProperties, 3552 &phys_dev->device_extension_cache); 3553 } 3554 return res; 3555 } 3556 3557 count = (dev_ext_list == NULL) ? 0: dev_ext_list->count; 3558 if (pProperties == NULL) { 3559 *pPropertyCount = count; 3560 return VK_SUCCESS; 3561 } 3562 3563 copy_size = *pPropertyCount < count ? *pPropertyCount : count; 3564 for (uint32_t i = 0; i < copy_size; i++) { 3565 memcpy(&pProperties[i], 3566 &dev_ext_list->list[i], 3567 sizeof(VkExtensionProperties)); 3568 } 3569 *pPropertyCount = copy_size; 3570 3571 if (copy_size < count) { 3572 return VK_INCOMPLETE; 3573 } 3574 3575 return VK_SUCCESS; 3576} 3577 3578VKAPI_ATTR VkResult VKAPI_CALL loader_EnumerateDeviceLayerProperties( 3579 VkPhysicalDevice physicalDevice, 3580 uint32_t* pPropertyCount, 3581 VkLayerProperties* pProperties) 3582{ 3583 uint32_t copy_size; 3584 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3585 3586 uint32_t count = phys_dev->this_instance->device_layer_list.count; 3587 3588 if (pProperties == NULL) { 3589 *pPropertyCount = count; 3590 return VK_SUCCESS; 3591 } 3592 3593 copy_size = (*pPropertyCount < count) ? *pPropertyCount : count; 3594 for (uint32_t i = 0; i < copy_size; i++) { 3595 memcpy(&pProperties[i], &(phys_dev->this_instance->device_layer_list.list[i].info), sizeof(VkLayerProperties)); 3596 } 3597 *pPropertyCount = copy_size; 3598 3599 if (copy_size < count) { 3600 return VK_INCOMPLETE; 3601 } 3602 3603 return VK_SUCCESS; 3604} 3605