loader.c revision 44fe0069d8bfa361cd8d34954cf16366aaac4d5d
1/* 2 * 3 * Copyright (C) 2015 Valve Corporation 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included 13 * in all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Author: Chia-I Wu <olvaffe@gmail.com> 24 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com> 25 * Author: Ian Elliott <ian@LunarG.com> 26 * Author: Jon Ashburn <jon@lunarg.com> 27 * 28 */ 29#define _GNU_SOURCE 30#include <stdio.h> 31#include <stdlib.h> 32#include <stdarg.h> 33#include <stdbool.h> 34#include <string.h> 35 36#include <sys/types.h> 37#if defined(_WIN32) 38#include "dirent_on_windows.h" 39#else // _WIN32 40#include <dirent.h> 41#endif // _WIN32 42#include "vk_loader_platform.h" 43#include "loader.h" 44#include "gpa_helper.h" 45#include "table_ops.h" 46#include "debug_report.h" 47#include "wsi_swapchain.h" 48#include "vulkan/vk_icd.h" 49#include "cJSON.h" 50#include "murmurhash.h" 51 52static loader_platform_dl_handle loader_add_layer_lib( 53 const struct loader_instance *inst, 54 const char *chain_type, 55 struct loader_layer_properties *layer_prop); 56 57static void loader_remove_layer_lib( 58 struct loader_instance *inst, 59 struct loader_layer_properties *layer_prop); 60 61struct loader_struct loader = {0}; 62// TLS for instance for alloc/free callbacks 63THREAD_LOCAL_DECL struct loader_instance *tls_instance; 64 65static bool loader_init_ext_list( 66 const struct loader_instance *inst, 67 struct loader_extension_list *ext_info); 68 69static int loader_platform_combine_path(char *dest, int len, ...); 70 71struct loader_phys_dev_per_icd { 72 uint32_t count; 73 VkPhysicalDevice *phys_devs; 74}; 75 76enum loader_debug { 77 LOADER_INFO_BIT = 0x01, 78 LOADER_WARN_BIT = 0x02, 79 LOADER_PERF_BIT = 0x04, 80 LOADER_ERROR_BIT = 0x08, 81 LOADER_DEBUG_BIT = 0x10, 82}; 83 84uint32_t g_loader_debug = 0; 85uint32_t g_loader_log_msgs = 0; 86 87//thread safety lock for accessing global data structures such as "loader" 88// all entrypoints on the instance chain need to be locked except GPA 89// additionally CreateDevice and DestroyDevice needs to be locked 90loader_platform_thread_mutex loader_lock; 91loader_platform_thread_mutex loader_json_lock; 92 93// This table contains the loader's instance dispatch table, which contains 94// default functions if no instance layers are activated. This contains 95// pointers to "terminator functions". 96const VkLayerInstanceDispatchTable instance_disp = { 97 .GetInstanceProcAddr = vkGetInstanceProcAddr, 98 .CreateInstance = loader_CreateInstance, 99 .DestroyInstance = loader_DestroyInstance, 100 .EnumeratePhysicalDevices = loader_EnumeratePhysicalDevices, 101 .GetPhysicalDeviceFeatures = loader_GetPhysicalDeviceFeatures, 102 .GetPhysicalDeviceFormatProperties = loader_GetPhysicalDeviceFormatProperties, 103 .GetPhysicalDeviceImageFormatProperties = loader_GetPhysicalDeviceImageFormatProperties, 104 .GetPhysicalDeviceProperties = loader_GetPhysicalDeviceProperties, 105 .GetPhysicalDeviceQueueFamilyProperties = loader_GetPhysicalDeviceQueueFamilyProperties, 106 .GetPhysicalDeviceMemoryProperties = loader_GetPhysicalDeviceMemoryProperties, 107 .EnumerateDeviceExtensionProperties = loader_EnumerateDeviceExtensionProperties, 108 .EnumerateDeviceLayerProperties = loader_EnumerateDeviceLayerProperties, 109 .GetPhysicalDeviceSparseImageFormatProperties = loader_GetPhysicalDeviceSparseImageFormatProperties, 110 .GetPhysicalDeviceSurfaceSupportKHR = loader_GetPhysicalDeviceSurfaceSupportKHR, 111 .DbgCreateMsgCallback = loader_DbgCreateMsgCallback, 112 .DbgDestroyMsgCallback = loader_DbgDestroyMsgCallback, 113}; 114 115LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init); 116 117void* loader_heap_alloc( 118 const struct loader_instance *instance, 119 size_t size, 120 VkSystemAllocationScope alloc_scope) 121{ 122 if (instance && instance->alloc_callbacks.pfnAllocation) { 123 /* TODO: What should default alignment be? 1, 4, 8, other? */ 124 return instance->alloc_callbacks.pfnAllocation(instance->alloc_callbacks.pUserData, size, 4, alloc_scope); 125 } 126 return malloc(size); 127} 128 129void loader_heap_free( 130 const struct loader_instance *instance, 131 void *pMemory) 132{ 133 if (pMemory == NULL) return; 134 if (instance && instance->alloc_callbacks.pfnFree) { 135 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMemory); 136 return; 137 } 138 free(pMemory); 139} 140 141void* loader_heap_realloc( 142 const struct loader_instance *instance, 143 void *pMemory, 144 size_t orig_size, 145 size_t size, 146 VkSystemAllocationScope alloc_scope) 147{ 148 if (pMemory == NULL || orig_size == 0) 149 return loader_heap_alloc(instance, size, alloc_scope); 150 if (size == 0) { 151 loader_heap_free(instance, pMemory); 152 return NULL; 153 } 154 if (instance && instance->alloc_callbacks.pfnAllocation) { 155 if (size <= orig_size) { 156 memset(((uint8_t *)pMemory) + size, 0, orig_size - size); 157 return pMemory; 158 } 159 void *new_ptr = instance->alloc_callbacks.pfnAllocation(instance->alloc_callbacks.pUserData, size, 4, alloc_scope); 160 if (!new_ptr) 161 return NULL; 162 memcpy(new_ptr, pMemory, orig_size); 163 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMemory); 164 return new_ptr; 165 } 166 return realloc(pMemory, size); 167} 168 169void *loader_tls_heap_alloc(size_t size) 170{ 171 return loader_heap_alloc(tls_instance, size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 172} 173 174void loader_tls_heap_free(void *pMemory) 175{ 176 loader_heap_free(tls_instance, pMemory); 177} 178 179static void loader_log(VkFlags msg_type, int32_t msg_code, 180 const char *format, ...) 181{ 182 char msg[512]; 183 va_list ap; 184 int ret; 185 186 if (!(msg_type & g_loader_log_msgs)) { 187 return; 188 } 189 190 va_start(ap, format); 191 ret = vsnprintf(msg, sizeof(msg), format, ap); 192 if ((ret >= (int) sizeof(msg)) || ret < 0) { 193 msg[sizeof(msg)-1] = '\0'; 194 } 195 va_end(ap); 196 197#if defined(WIN32) 198 OutputDebugString(msg); 199 OutputDebugString("\n"); 200#endif 201 fputs(msg, stderr); 202 fputc('\n', stderr); 203} 204 205#if defined(WIN32) 206static char *loader_get_next_path(char *path); 207/** 208* Find the list of registry files (names within a key) in key "location". 209* 210* This function looks in the registry (hive = DEFAULT_VK_REGISTRY_HIVE) key as given in "location" 211* for a list or name/values which are added to a returned list (function return value). 212* The DWORD values within the key must be 0 or they are skipped. 213* Function return is a string with a ';' separated list of filenames. 214* Function return is NULL if no valid name/value pairs are found in the key, 215* or the key is not found. 216* 217* \returns 218* A string list of filenames as pointer. 219* When done using the returned string list, pointer should be freed. 220*/ 221static char *loader_get_registry_files(const struct loader_instance *inst, char *location) 222{ 223 LONG rtn_value; 224 HKEY hive, key; 225 DWORD access_flags; 226 char name[2048]; 227 char *out = NULL; 228 char *loc = location; 229 char *next; 230 DWORD idx = 0; 231 DWORD name_size = sizeof(name); 232 DWORD value; 233 DWORD total_size = 4096; 234 DWORD value_size = sizeof(value); 235 236 while(*loc) 237 { 238 next = loader_get_next_path(loc); 239 hive = DEFAULT_VK_REGISTRY_HIVE; 240 access_flags = KEY_QUERY_VALUE; 241 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 242 if (rtn_value != ERROR_SUCCESS) { 243 // We didn't find the key. Try the 32-bit hive (where we've seen the 244 // key end up on some people's systems): 245 access_flags |= KEY_WOW64_32KEY; 246 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 247 if (rtn_value != ERROR_SUCCESS) { 248 // We still couldn't find the key, so give up: 249 loc = next; 250 continue; 251 } 252 } 253 254 while ((rtn_value = RegEnumValue(key, idx++, name, &name_size, NULL, NULL, (LPBYTE) &value, &value_size)) == ERROR_SUCCESS) { 255 if (value_size == sizeof(value) && value == 0) { 256 if (out == NULL) { 257 out = loader_heap_alloc(inst, total_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 258 out[0] = '\0'; 259 } 260 else if (strlen(out) + name_size + 1 > total_size) { 261 out = loader_heap_realloc(inst, out, total_size, total_size * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 262 total_size *= 2; 263 } 264 if (out == NULL) { 265 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory, failed loader_get_registry_files"); 266 return NULL; 267 } 268 if (strlen(out) == 0) 269 snprintf(out, name_size + 1, "%s", name); 270 else 271 snprintf(out + strlen(out), name_size + 2, "%c%s", PATH_SEPERATOR, name); 272 } 273 name_size = 2048; 274 } 275 loc = next; 276 } 277 278 return out; 279} 280 281#endif // WIN32 282 283/** 284 * Combine path elements, separating each element with the platform-specific 285 * directory separator, and save the combined string to a destination buffer, 286 * not exceeding the given length. Path elements are given as variadic args, 287 * with a NULL element terminating the list. 288 * 289 * \returns the total length of the combined string, not including an ASCII 290 * NUL termination character. This length may exceed the available storage: 291 * in this case, the written string will be truncated to avoid a buffer 292 * overrun, and the return value will greater than or equal to the storage 293 * size. A NULL argument may be provided as the destination buffer in order 294 * to determine the required string length without actually writing a string. 295 */ 296 297static int loader_platform_combine_path(char *dest, int len, ...) 298{ 299 int required_len = 0; 300 va_list ap; 301 const char *component; 302 303 va_start(ap, len); 304 305 while((component = va_arg(ap, const char *))) { 306 if (required_len > 0) { 307 // This path element is not the first non-empty element; prepend 308 // a directory separator if space allows 309 if (dest && required_len + 1 < len) { 310 snprintf(dest + required_len, len - required_len, "%c", 311 DIRECTORY_SYMBOL); 312 } 313 required_len++; 314 } 315 316 if (dest && required_len < len) { 317 strncpy(dest + required_len, component, len - required_len); 318 } 319 required_len += strlen(component); 320 } 321 322 va_end(ap); 323 324 // strncpy(3) won't add a NUL terminating byte in the event of truncation. 325 if (dest && required_len >= len) { 326 dest[len - 1] = '\0'; 327 } 328 329 return required_len; 330} 331 332 333/** 334 * Given string of three part form "maj.min.pat" convert to a vulkan version 335 * number. 336 */ 337static uint32_t loader_make_version(const char *vers_str) 338{ 339 uint32_t vers = 0, major=0, minor=0, patch=0; 340 char *minor_str= NULL; 341 char *patch_str = NULL; 342 char *cstr; 343 char *str; 344 345 if (!vers_str) 346 return vers; 347 cstr = loader_stack_alloc(strlen(vers_str) + 1); 348 strcpy(cstr, vers_str); 349 while ((str = strchr(cstr, '.')) != NULL) { 350 if (minor_str == NULL) { 351 minor_str = str + 1; 352 *str = '\0'; 353 major = atoi(cstr); 354 } 355 else if (patch_str == NULL) { 356 patch_str = str + 1; 357 *str = '\0'; 358 minor = atoi(minor_str); 359 } 360 else { 361 return vers; 362 } 363 cstr = str + 1; 364 } 365 patch = atoi(patch_str); 366 367 return VK_MAKE_VERSION(major, minor, patch); 368 369} 370 371bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) 372{ 373 return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false; 374} 375 376/** 377 * Search the given ext_array for an extension 378 * matching the given vk_ext_prop 379 */ 380bool has_vk_extension_property_array( 381 const VkExtensionProperties *vk_ext_prop, 382 const uint32_t count, 383 const VkExtensionProperties *ext_array) 384{ 385 for (uint32_t i = 0; i < count; i++) { 386 if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) 387 return true; 388 } 389 return false; 390} 391 392/** 393 * Search the given ext_list for an extension 394 * matching the given vk_ext_prop 395 */ 396bool has_vk_extension_property( 397 const VkExtensionProperties *vk_ext_prop, 398 const struct loader_extension_list *ext_list) 399{ 400 for (uint32_t i = 0; i < ext_list->count; i++) { 401 if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) 402 return true; 403 } 404 return false; 405} 406 407static inline bool loader_is_layer_type_device(const enum layer_type type) { 408 if ((type & VK_LAYER_TYPE_DEVICE_EXPLICIT) || 409 (type & VK_LAYER_TYPE_DEVICE_IMPLICIT)) 410 return true; 411 return false; 412} 413 414/* 415 * Search the given layer list for a layer matching the given layer name 416 */ 417static struct loader_layer_properties *loader_get_layer_property( 418 const char *name, 419 const struct loader_layer_list *layer_list) 420{ 421 for (uint32_t i = 0; i < layer_list->count; i++) { 422 const VkLayerProperties *item = &layer_list->list[i].info; 423 if (strcmp(name, item->layerName) == 0) 424 return &layer_list->list[i]; 425 } 426 return NULL; 427} 428 429/** 430 * Get the next unused layer property in the list. Init the property to zero. 431 */ 432static struct loader_layer_properties *loader_get_next_layer_property( 433 const struct loader_instance *inst, 434 struct loader_layer_list *layer_list) 435{ 436 if (layer_list->capacity == 0) { 437 layer_list->list = loader_heap_alloc(inst, 438 sizeof(struct loader_layer_properties) * 64, 439 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 440 if (layer_list->list == NULL) { 441 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't add any layer properties to list"); 442 return NULL; 443 } 444 memset(layer_list->list, 0, sizeof(struct loader_layer_properties) * 64); 445 layer_list->capacity = sizeof(struct loader_layer_properties) * 64; 446 } 447 448 // ensure enough room to add an entry 449 if ((layer_list->count + 1) * sizeof (struct loader_layer_properties) 450 > layer_list->capacity) { 451 layer_list->list = loader_heap_realloc(inst, layer_list->list, 452 layer_list->capacity, 453 layer_list->capacity * 2, 454 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 455 if (layer_list->list == NULL) { 456 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 457 "realloc failed for layer list"); 458 } 459 layer_list->capacity *= 2; 460 } 461 462 layer_list->count++; 463 return &(layer_list->list[layer_list->count - 1]); 464} 465 466/** 467 * Remove all layer properties entrys from the list 468 */ 469void loader_delete_layer_properties( 470 const struct loader_instance *inst, 471 struct loader_layer_list *layer_list) 472{ 473 uint32_t i; 474 475 if (!layer_list) 476 return; 477 478 for (i = 0; i < layer_list->count; i++) { 479 loader_destroy_ext_list(inst, &layer_list->list[i].instance_extension_list); 480 loader_destroy_ext_list(inst, &layer_list->list[i].device_extension_list); 481 } 482 layer_list->count = 0; 483 484 if (layer_list->capacity > 0) { 485 layer_list->capacity = 0; 486 loader_heap_free(inst, layer_list->list); 487 } 488 489} 490 491static void loader_add_global_extensions( 492 const struct loader_instance *inst, 493 const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, 494 const char *lib_name, 495 struct loader_extension_list *ext_list) 496{ 497 uint32_t i, count; 498 VkExtensionProperties *ext_props; 499 VkResult res; 500 501 if (!fp_get_props) { 502 /* No EnumerateInstanceExtensionProperties defined */ 503 return; 504 } 505 506 res = fp_get_props(NULL, &count, NULL); 507 if (res != VK_SUCCESS) { 508 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Error getting global extension count from %s", lib_name); 509 return; 510 } 511 512 if (count == 0) { 513 /* No ExtensionProperties to report */ 514 return; 515 } 516 517 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); 518 519 res = fp_get_props(NULL, &count, ext_props); 520 if (res != VK_SUCCESS) { 521 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Error getting global extensions from %s", lib_name); 522 return; 523 } 524 525 for (i = 0; i < count; i++) { 526 char spec_version[64]; 527 528 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 529 VK_MAJOR(ext_props[i].specVersion), 530 VK_MINOR(ext_props[i].specVersion), 531 VK_PATCH(ext_props[i].specVersion)); 532 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 533 "Global Extension: %s (%s) version %s", 534 ext_props[i].extensionName, lib_name, spec_version); 535 loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 536 } 537 538 return; 539} 540 541/* 542 * Initialize ext_list with the physical device extensions. 543 * The extension properties are passed as inputs in count and ext_props. 544 */ 545static VkResult loader_init_physical_device_extensions( 546 const struct loader_instance *inst, 547 struct loader_physical_device *phys_dev, 548 uint32_t count, 549 VkExtensionProperties *ext_props, 550 struct loader_extension_list *ext_list) 551{ 552 VkResult res; 553 uint32_t i; 554 555 if (!loader_init_ext_list(inst, ext_list)) { 556 return VK_ERROR_OUT_OF_HOST_MEMORY; 557 } 558 559 for (i = 0; i < count; i++) { 560 char spec_version[64]; 561 562 snprintf(spec_version, sizeof (spec_version), "%d.%d.%d", 563 VK_MAJOR(ext_props[i].specVersion), 564 VK_MINOR(ext_props[i].specVersion), 565 VK_PATCH(ext_props[i].specVersion)); 566 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 567 "PhysicalDevice Extension: %s (%s) version %s", 568 ext_props[i].extensionName, phys_dev->this_icd->this_icd_lib->lib_name, spec_version); 569 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 570 if (res != VK_SUCCESS) 571 return res; 572 } 573 574 return VK_SUCCESS; 575} 576 577static VkResult loader_add_physical_device_extensions( 578 const struct loader_instance *inst, 579 VkPhysicalDevice physical_device, 580 const char *lib_name, 581 struct loader_extension_list *ext_list) 582{ 583 uint32_t i, count; 584 VkResult res; 585 VkExtensionProperties *ext_props; 586 587 res = loader_EnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL); 588 if (res == VK_SUCCESS && count > 0) { 589 ext_props = loader_stack_alloc(count * sizeof (VkExtensionProperties)); 590 if (!ext_props) 591 return VK_ERROR_OUT_OF_HOST_MEMORY; 592 res = loader_EnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props); 593 if (res != VK_SUCCESS) 594 return res; 595 for (i = 0; i < count; i++) { 596 char spec_version[64]; 597 598 snprintf(spec_version, sizeof (spec_version), "%d.%d.%d", 599 VK_MAJOR(ext_props[i].specVersion), 600 VK_MINOR(ext_props[i].specVersion), 601 VK_PATCH(ext_props[i].specVersion)); 602 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 603 "PhysicalDevice Extension: %s (%s) version %s", 604 ext_props[i].extensionName, lib_name, spec_version); 605 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 606 if (res != VK_SUCCESS) 607 return res; 608 } 609 } else { 610 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Error getting physical device extension info count from library %s", lib_name); 611 return res; 612 } 613 614 return VK_SUCCESS; 615} 616 617static bool loader_init_ext_list(const struct loader_instance *inst, 618 struct loader_extension_list *ext_info) 619{ 620 ext_info->capacity = 32 * sizeof(VkExtensionProperties); 621 ext_info->list = loader_heap_alloc(inst, ext_info->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 622 if (ext_info->list == NULL) { 623 return false; 624 } 625 memset(ext_info->list, 0, ext_info->capacity); 626 ext_info->count = 0; 627 return true; 628} 629 630void loader_destroy_ext_list(const struct loader_instance *inst, 631 struct loader_extension_list *ext_info) 632{ 633 loader_heap_free(inst, ext_info->list); 634 ext_info->count = 0; 635 ext_info->capacity = 0; 636} 637 638/* 639 * Append non-duplicate extension properties defined in props 640 * to the given ext_list. 641 * Return 642 * Vk_SUCCESS on success 643 */ 644VkResult loader_add_to_ext_list( 645 const struct loader_instance *inst, 646 struct loader_extension_list *ext_list, 647 uint32_t prop_list_count, 648 const VkExtensionProperties *props) 649{ 650 uint32_t i; 651 const VkExtensionProperties *cur_ext; 652 653 if (ext_list->list == NULL || ext_list->capacity == 0) { 654 loader_init_ext_list(inst, ext_list); 655 } 656 657 if (ext_list->list == NULL) 658 return VK_ERROR_OUT_OF_HOST_MEMORY; 659 660 for (i = 0; i < prop_list_count; i++) { 661 cur_ext = &props[i]; 662 663 // look for duplicates 664 if (has_vk_extension_property(cur_ext, ext_list)) { 665 continue; 666 } 667 668 // add to list at end 669 // check for enough capacity 670 if (ext_list->count * sizeof(VkExtensionProperties) 671 >= ext_list->capacity) { 672 673 ext_list->list = loader_heap_realloc(inst, 674 ext_list->list, 675 ext_list->capacity, 676 ext_list->capacity * 2, 677 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 678 679 if (ext_list->list == NULL) 680 return VK_ERROR_OUT_OF_HOST_MEMORY; 681 682 // double capacity 683 ext_list->capacity *= 2; 684 } 685 686 memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties)); 687 ext_list->count++; 688 } 689 return VK_SUCCESS; 690} 691 692/** 693 * Search the given search_list for any layers in the props list. 694 * Add these to the output layer_list. Don't add duplicates to the output layer_list. 695 */ 696static VkResult loader_add_layer_names_to_list( 697 const struct loader_instance *inst, 698 struct loader_layer_list *output_list, 699 uint32_t name_count, 700 const char * const *names, 701 const struct loader_layer_list *search_list) 702{ 703 struct loader_layer_properties *layer_prop; 704 VkResult err = VK_SUCCESS; 705 706 for (uint32_t i = 0; i < name_count; i++) { 707 const char *search_target = names[i]; 708 layer_prop = loader_get_layer_property(search_target, search_list); 709 if (!layer_prop) { 710 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Unable to find layer %s", search_target); 711 err = VK_ERROR_LAYER_NOT_PRESENT; 712 continue; 713 } 714 715 loader_add_to_layer_list(inst, output_list, 1, layer_prop); 716 } 717 718 return err; 719} 720 721 722/* 723 * Manage lists of VkLayerProperties 724 */ 725static bool loader_init_layer_list(const struct loader_instance *inst, 726 struct loader_layer_list *list) 727{ 728 list->capacity = 32 * sizeof(struct loader_layer_properties); 729 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 730 if (list->list == NULL) { 731 return false; 732 } 733 memset(list->list, 0, list->capacity); 734 list->count = 0; 735 return true; 736} 737 738void loader_destroy_layer_list(const struct loader_instance *inst, 739 struct loader_layer_list *layer_list) 740{ 741 loader_heap_free(inst, layer_list->list); 742 layer_list->count = 0; 743 layer_list->capacity = 0; 744} 745 746/* 747 * Manage list of layer libraries (loader_lib_info) 748 */ 749static bool loader_init_layer_library_list(const struct loader_instance *inst, 750 struct loader_layer_library_list *list) 751{ 752 list->capacity = 32 * sizeof(struct loader_lib_info); 753 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 754 if (list->list == NULL) { 755 return false; 756 } 757 memset(list->list, 0, list->capacity); 758 list->count = 0; 759 return true; 760} 761 762void loader_destroy_layer_library_list(const struct loader_instance *inst, 763 struct loader_layer_library_list *list) 764{ 765 for (uint32_t i = 0; i < list->count; i++) { 766 loader_heap_free(inst, list->list[i].lib_name); 767 } 768 loader_heap_free(inst, list->list); 769 list->count = 0; 770 list->capacity = 0; 771} 772 773void loader_add_to_layer_library_list( 774 const struct loader_instance *inst, 775 struct loader_layer_library_list *list, 776 uint32_t item_count, 777 const struct loader_lib_info *new_items) 778{ 779 uint32_t i; 780 struct loader_lib_info *item; 781 782 if (list->list == NULL || list->capacity == 0) { 783 loader_init_layer_library_list(inst, list); 784 } 785 786 if (list->list == NULL) 787 return; 788 789 for (i = 0; i < item_count; i++) { 790 item = (struct loader_lib_info *) &new_items[i]; 791 792 // look for duplicates 793 for (uint32_t j = 0; j < list->count; j++) { 794 if (strcmp(list->list[i].lib_name, new_items->lib_name) == 0) { 795 continue; 796 } 797 } 798 799 // add to list at end 800 // check for enough capacity 801 if (list->count * sizeof(struct loader_lib_info) 802 >= list->capacity) { 803 804 list->list = loader_heap_realloc(inst, 805 list->list, 806 list->capacity, 807 list->capacity * 2, 808 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 809 // double capacity 810 list->capacity *= 2; 811 } 812 813 memcpy(&list->list[list->count], item, sizeof(struct loader_lib_info)); 814 list->count++; 815 } 816} 817 818 819/* 820 * Search the given layer list for a list 821 * matching the given VkLayerProperties 822 */ 823bool has_vk_layer_property( 824 const VkLayerProperties *vk_layer_prop, 825 const struct loader_layer_list *list) 826{ 827 for (uint32_t i = 0; i < list->count; i++) { 828 if (strcmp(vk_layer_prop->layerName, list->list[i].info.layerName) == 0) 829 return true; 830 } 831 return false; 832} 833 834/* 835 * Search the given layer list for a layer 836 * matching the given name 837 */ 838bool has_layer_name( 839 const char *name, 840 const struct loader_layer_list *list) 841{ 842 for (uint32_t i = 0; i < list->count; i++) { 843 if (strcmp(name, list->list[i].info.layerName) == 0) 844 return true; 845 } 846 return false; 847} 848 849/* 850 * Append non-duplicate layer properties defined in prop_list 851 * to the given layer_info list 852 */ 853void loader_add_to_layer_list( 854 const struct loader_instance *inst, 855 struct loader_layer_list *list, 856 uint32_t prop_list_count, 857 const struct loader_layer_properties *props) 858{ 859 uint32_t i; 860 struct loader_layer_properties *layer; 861 862 if (list->list == NULL || list->capacity == 0) { 863 loader_init_layer_list(inst, list); 864 } 865 866 if (list->list == NULL) 867 return; 868 869 for (i = 0; i < prop_list_count; i++) { 870 layer = (struct loader_layer_properties *) &props[i]; 871 872 // look for duplicates 873 if (has_vk_layer_property(&layer->info, list)) { 874 continue; 875 } 876 877 // add to list at end 878 // check for enough capacity 879 if (list->count * sizeof(struct loader_layer_properties) 880 >= list->capacity) { 881 882 list->list = loader_heap_realloc(inst, 883 list->list, 884 list->capacity, 885 list->capacity * 2, 886 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 887 // double capacity 888 list->capacity *= 2; 889 } 890 891 memcpy(&list->list[list->count], layer, sizeof(struct loader_layer_properties)); 892 list->count++; 893 } 894} 895 896/** 897 * Search the search_list for any layer with a name 898 * that matches the given name and a type that matches the given type 899 * Add all matching layers to the found_list 900 * Do not add if found loader_layer_properties is already 901 * on the found_list. 902 */ 903static void loader_find_layer_name_add_list( 904 const struct loader_instance *inst, 905 const char *name, 906 const enum layer_type type, 907 const struct loader_layer_list *search_list, 908 struct loader_layer_list *found_list) 909{ 910 bool found = false; 911 for (uint32_t i = 0; i < search_list->count; i++) { 912 struct loader_layer_properties *layer_prop = &search_list->list[i]; 913 if (0 == strcmp(layer_prop->info.layerName, name) && 914 (layer_prop->type & type)) { 915 /* Found a layer with the same name, add to found_list */ 916 loader_add_to_layer_list(inst, found_list, 1, layer_prop); 917 found = true; 918 } 919 } 920 if (!found) { 921 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Warning, couldn't find layer name %s to activate", name); 922 } 923} 924 925static VkExtensionProperties *get_extension_property( 926 const char *name, 927 const struct loader_extension_list *list) 928{ 929 for (uint32_t i = 0; i < list->count; i++) { 930 if (strcmp(name, list->list[i].extensionName) == 0) 931 return &list->list[i]; 932 } 933 return NULL; 934} 935 936/* 937 * For global exenstions implemented within the loader (i.e. DEBUG_REPORT 938 * the extension must provide two entry points for the loader to use: 939 * - "trampoline" entry point - this is the address returned by GetProcAddr 940 * and will always do what's necessary to support a global call. 941 * - "terminator" function - this function will be put at the end of the 942 * instance chain and will contain the necessary logica to call / process 943 * the extension for the appropriate ICDs that are available. 944 * There is no generic mechanism for including these functions, the references 945 * must be placed into the appropriate loader entry points. 946 * GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr requests 947 * loader_coalesce_extensions(void) - add extension records to the list of global 948 * extension available to the app. 949 * instance_disp - add function pointer for terminator function to this array. 950 * The extension itself should be in a separate file that will be 951 * linked directly with the loader. 952 */ 953 954void loader_get_icd_loader_instance_extensions( 955 const struct loader_instance *inst, 956 struct loader_icd_libs *icd_libs, 957 struct loader_extension_list *inst_exts) 958{ 959 struct loader_extension_list icd_exts; 960 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Build ICD instance extension list"); 961 // traverse scanned icd list adding non-duplicate extensions to the list 962 for (uint32_t i = 0; i < icd_libs->count; i++) { 963 loader_init_ext_list(inst, &icd_exts); 964 loader_add_global_extensions(inst, icd_libs->list[i].EnumerateInstanceExtensionProperties, 965 icd_libs->list[i].lib_name, 966 &icd_exts); 967 loader_add_to_ext_list(inst, inst_exts, 968 icd_exts.count, 969 icd_exts.list); 970 loader_destroy_ext_list(inst, &icd_exts); 971 }; 972 973 // Traverse loader's extensions, adding non-duplicate extensions to the list 974 wsi_swapchain_add_instance_extensions(inst, inst_exts); 975 debug_report_add_instance_extensions(inst, inst_exts); 976} 977 978struct loader_icd *loader_get_icd_and_device(const VkDevice device, 979 struct loader_device **found_dev) 980{ 981 *found_dev = NULL; 982 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 983 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) { 984 for (struct loader_device *dev = icd->logical_device_list; dev; dev = dev->next) 985 /* Value comparison of device prevents object wrapping by layers */ 986 if (loader_get_dispatch(dev->device) == loader_get_dispatch(device)) { 987 *found_dev = dev; 988 return icd; 989 } 990 } 991 } 992 return NULL; 993} 994 995static void loader_destroy_logical_device(const struct loader_instance *inst, 996 struct loader_device *dev) 997{ 998 loader_heap_free(inst, dev->app_extension_props); 999 if (dev->activated_layer_list.count) 1000 loader_destroy_layer_list(inst, &dev->activated_layer_list); 1001 loader_heap_free(inst, dev); 1002} 1003 1004static struct loader_device *loader_add_logical_device( 1005 const struct loader_instance *inst, 1006 const VkDevice dev, 1007 struct loader_device **device_list) 1008{ 1009 struct loader_device *new_dev; 1010 1011 new_dev = loader_heap_alloc(inst, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); 1012 if (!new_dev) { 1013 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc struct laoder-device"); 1014 return NULL; 1015 } 1016 1017 memset(new_dev, 0, sizeof(struct loader_device)); 1018 1019 new_dev->next = *device_list; 1020 new_dev->device = dev; 1021 *device_list = new_dev; 1022 return new_dev; 1023} 1024 1025void loader_remove_logical_device( 1026 const struct loader_instance *inst, 1027 VkDevice device) 1028{ 1029 struct loader_device *found_dev, *dev, *prev_dev; 1030 struct loader_icd *icd; 1031 icd = loader_get_icd_and_device(device, &found_dev); 1032 1033 if (!icd || !found_dev) 1034 return; 1035 1036 prev_dev = NULL; 1037 dev = icd->logical_device_list; 1038 while (dev && dev != found_dev) { 1039 prev_dev = dev; 1040 dev = dev->next; 1041 } 1042 1043 if (prev_dev) 1044 prev_dev->next = found_dev->next; 1045 else 1046 icd->logical_device_list = found_dev->next; 1047 loader_destroy_logical_device(inst, found_dev); 1048} 1049 1050 1051static void loader_icd_destroy( 1052 struct loader_instance *ptr_inst, 1053 struct loader_icd *icd) 1054{ 1055 ptr_inst->total_icd_count--; 1056 for (struct loader_device *dev = icd->logical_device_list; dev; ) { 1057 struct loader_device *next_dev = dev->next; 1058 loader_destroy_logical_device(ptr_inst, dev); 1059 dev = next_dev; 1060 } 1061 1062 loader_heap_free(ptr_inst, icd); 1063} 1064 1065static struct loader_icd * loader_icd_create(const struct loader_instance *inst) 1066{ 1067 struct loader_icd *icd; 1068 1069 icd = loader_heap_alloc(inst, sizeof(*icd), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1070 if (!icd) 1071 return NULL; 1072 1073 memset(icd, 0, sizeof(*icd)); 1074 1075 return icd; 1076} 1077 1078static struct loader_icd *loader_icd_add( 1079 struct loader_instance *ptr_inst, 1080 const struct loader_scanned_icds *icd_lib) 1081{ 1082 struct loader_icd *icd; 1083 1084 icd = loader_icd_create(ptr_inst); 1085 if (!icd) 1086 return NULL; 1087 1088 icd->this_icd_lib = icd_lib; 1089 icd->this_instance = ptr_inst; 1090 1091 /* prepend to the list */ 1092 icd->next = ptr_inst->icds; 1093 ptr_inst->icds = icd; 1094 ptr_inst->total_icd_count++; 1095 1096 return icd; 1097} 1098 1099void loader_scanned_icd_clear( 1100 const struct loader_instance *inst, 1101 struct loader_icd_libs *icd_libs) 1102{ 1103 if (icd_libs->capacity == 0) 1104 return; 1105 for (uint32_t i = 0; i < icd_libs->count; i++) { 1106 loader_platform_close_library(icd_libs->list[i].handle); 1107 loader_heap_free(inst, icd_libs->list[i].lib_name); 1108 } 1109 loader_heap_free(inst, icd_libs->list); 1110 icd_libs->capacity = 0; 1111 icd_libs->count = 0; 1112 icd_libs->list = NULL; 1113} 1114 1115static void loader_scanned_icd_init(const struct loader_instance *inst, 1116 struct loader_icd_libs *icd_libs) 1117{ 1118 loader_scanned_icd_clear(inst, icd_libs); 1119 icd_libs->capacity = 8 * sizeof(struct loader_scanned_icds); 1120 icd_libs->list = loader_heap_alloc(inst, icd_libs->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1121 1122} 1123 1124static void loader_scanned_icd_add( 1125 const struct loader_instance *inst, 1126 struct loader_icd_libs *icd_libs, 1127 const char *filename, 1128 uint32_t api_version) 1129{ 1130 loader_platform_dl_handle handle; 1131 PFN_vkCreateInstance fp_create_inst; 1132 PFN_vkEnumerateInstanceExtensionProperties fp_get_global_ext_props; 1133 PFN_vkGetInstanceProcAddr fp_get_proc_addr; 1134 struct loader_scanned_icds *new_node; 1135 1136 /* TODO implement ref counting of libraries, for now this function leaves 1137 libraries open and the scanned_icd_clear closes them */ 1138 // Used to call: dlopen(filename, RTLD_LAZY); 1139 handle = loader_platform_open_library(filename); 1140 if (!handle) { 1141 loader_log(VK_DBG_REPORT_WARN_BIT, 0, loader_platform_open_library_error(filename)); 1142 return; 1143 } 1144 1145#define LOOKUP_LD(func_ptr, func) do { \ 1146 func_ptr = (PFN_vk ##func) loader_platform_get_proc_address(handle, "vk" #func); \ 1147 if (!func_ptr) { \ 1148 loader_log(VK_DBG_REPORT_WARN_BIT, 0, loader_platform_get_proc_address_error("vk" #func)); \ 1149 return; \ 1150 } \ 1151} while (0) 1152 1153 LOOKUP_LD(fp_get_proc_addr, GetInstanceProcAddr); 1154 LOOKUP_LD(fp_create_inst, CreateInstance); 1155 LOOKUP_LD(fp_get_global_ext_props, EnumerateInstanceExtensionProperties); 1156 1157#undef LOOKUP_LD 1158 1159 // check for enough capacity 1160 if ((icd_libs->count * sizeof(struct loader_scanned_icds)) >= icd_libs->capacity) { 1161 1162 icd_libs->list = loader_heap_realloc(inst, 1163 icd_libs->list, 1164 icd_libs->capacity, 1165 icd_libs->capacity * 2, 1166 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1167 // double capacity 1168 icd_libs->capacity *= 2; 1169 } 1170 new_node = &(icd_libs->list[icd_libs->count]); 1171 1172 new_node->handle = handle; 1173 new_node->api_version = api_version; 1174 new_node->GetInstanceProcAddr = fp_get_proc_addr; 1175 new_node->CreateInstance = fp_create_inst; 1176 new_node->EnumerateInstanceExtensionProperties = fp_get_global_ext_props; 1177 1178 new_node->lib_name = (char *) loader_heap_alloc(inst, 1179 strlen(filename) + 1, 1180 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1181 if (!new_node->lib_name) { 1182 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Out of memory can't add icd"); 1183 return; 1184 } 1185 strcpy(new_node->lib_name, filename); 1186 icd_libs->count++; 1187} 1188 1189static bool loader_icd_init_entrys(struct loader_icd *icd, 1190 VkInstance inst, 1191 const PFN_vkGetInstanceProcAddr fp_gipa) 1192{ 1193 /* initialize entrypoint function pointers */ 1194 1195 #define LOOKUP_GIPA(func, required) do { \ 1196 icd->func = (PFN_vk ##func) fp_gipa(inst, "vk" #func); \ 1197 if (!icd->func && required) { \ 1198 loader_log(VK_DBG_REPORT_WARN_BIT, 0, \ 1199 loader_platform_get_proc_address_error("vk" #func)); \ 1200 return false; \ 1201 } \ 1202 } while (0) 1203 1204 LOOKUP_GIPA(GetDeviceProcAddr, true); 1205 LOOKUP_GIPA(DestroyInstance, true); 1206 LOOKUP_GIPA(EnumeratePhysicalDevices, true); 1207 LOOKUP_GIPA(GetPhysicalDeviceFeatures, true); 1208 LOOKUP_GIPA(GetPhysicalDeviceFormatProperties, true); 1209 LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties, true); 1210 LOOKUP_GIPA(CreateDevice, true); 1211 LOOKUP_GIPA(GetPhysicalDeviceProperties, true); 1212 LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties, true); 1213 LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties, true); 1214 LOOKUP_GIPA(EnumerateDeviceExtensionProperties, true); 1215 LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties, true); 1216 LOOKUP_GIPA(DbgCreateMsgCallback, false); 1217 LOOKUP_GIPA(DbgDestroyMsgCallback, false); 1218 LOOKUP_GIPA(GetPhysicalDeviceSurfaceSupportKHR, false); 1219 1220#undef LOOKUP_GIPA 1221 1222 return true; 1223} 1224 1225static void loader_debug_init(void) 1226{ 1227 const char *env; 1228 1229 if (g_loader_debug > 0) 1230 return; 1231 1232 g_loader_debug = 0; 1233 1234 /* parse comma-separated debug options */ 1235 env = getenv("VK_LOADER_DEBUG"); 1236 while (env) { 1237 const char *p = strchr(env, ','); 1238 size_t len; 1239 1240 if (p) 1241 len = p - env; 1242 else 1243 len = strlen(env); 1244 1245 if (len > 0) { 1246 if (strncmp(env, "warn", len) == 0) { 1247 g_loader_debug |= LOADER_WARN_BIT; 1248 g_loader_log_msgs |= VK_DBG_REPORT_WARN_BIT; 1249 } else if (strncmp(env, "info", len) == 0) { 1250 g_loader_debug |= LOADER_INFO_BIT; 1251 g_loader_log_msgs |= VK_DBG_REPORT_INFO_BIT; 1252 } else if (strncmp(env, "perf", len) == 0) { 1253 g_loader_debug |= LOADER_PERF_BIT; 1254 g_loader_log_msgs |= VK_DBG_REPORT_PERF_WARN_BIT; 1255 } else if (strncmp(env, "error", len) == 0) { 1256 g_loader_debug |= LOADER_ERROR_BIT; 1257 g_loader_log_msgs |= VK_DBG_REPORT_ERROR_BIT; 1258 } else if (strncmp(env, "debug", len) == 0) { 1259 g_loader_debug |= LOADER_DEBUG_BIT; 1260 g_loader_log_msgs |= VK_DBG_REPORT_DEBUG_BIT; 1261 } 1262 } 1263 1264 if (!p) 1265 break; 1266 1267 env = p + 1; 1268 } 1269} 1270 1271void loader_initialize(void) 1272{ 1273 // initialize mutexs 1274 loader_platform_thread_create_mutex(&loader_lock); 1275 loader_platform_thread_create_mutex(&loader_json_lock); 1276 1277 // initialize logging 1278 loader_debug_init(); 1279 1280 // initial cJSON to use alloc callbacks 1281 cJSON_Hooks alloc_fns = { 1282 .malloc_fn = loader_tls_heap_alloc, 1283 .free_fn = loader_tls_heap_free, 1284 }; 1285 cJSON_InitHooks(&alloc_fns); 1286} 1287 1288struct loader_manifest_files { 1289 uint32_t count; 1290 char **filename_list; 1291}; 1292 1293/** 1294 * Get next file or dirname given a string list or registry key path 1295 * 1296 * \returns 1297 * A pointer to first char in the next path. 1298 * The next path (or NULL) in the list is returned in next_path. 1299 * Note: input string is modified in some cases. PASS IN A COPY! 1300 */ 1301static char *loader_get_next_path(char *path) 1302{ 1303 uint32_t len; 1304 char *next; 1305 1306 if (path == NULL) 1307 return NULL; 1308 next = strchr(path, PATH_SEPERATOR); 1309 if (next == NULL) { 1310 len = (uint32_t) strlen(path); 1311 next = path + len; 1312 } 1313 else { 1314 *next = '\0'; 1315 next++; 1316 } 1317 1318 return next; 1319} 1320 1321/** 1322 * Given a path which is absolute or relative, expand the path if relative or 1323 * leave the path unmodified if absolute. The base path to prepend to relative 1324 * paths is given in rel_base. 1325 * 1326 * \returns 1327 * A string in out_fullpath of the full absolute path 1328 */ 1329static void loader_expand_path(const char *path, 1330 const char *rel_base, 1331 size_t out_size, 1332 char *out_fullpath) 1333{ 1334 if (loader_platform_is_path_absolute(path)) { 1335 // do not prepend a base to an absolute path 1336 rel_base = ""; 1337 } 1338 1339 loader_platform_combine_path(out_fullpath, out_size, rel_base, path, NULL); 1340} 1341 1342/** 1343 * Given a filename (file) and a list of paths (dir), try to find an existing 1344 * file in the paths. If filename already is a path then no 1345 * searching in the given paths. 1346 * 1347 * \returns 1348 * A string in out_fullpath of either the full path or file. 1349 */ 1350static void loader_get_fullpath(const char *file, 1351 const char *dirs, 1352 size_t out_size, 1353 char *out_fullpath) 1354{ 1355 if (!loader_platform_is_path(file) && *dirs) { 1356 char *dirs_copy, *dir, *next_dir; 1357 1358 dirs_copy = loader_stack_alloc(strlen(dirs) + 1); 1359 strcpy(dirs_copy, dirs); 1360 1361 //find if file exists after prepending paths in given list 1362 for (dir = dirs_copy; 1363 *dir && (next_dir = loader_get_next_path(dir)); 1364 dir = next_dir) { 1365 loader_platform_combine_path(out_fullpath, out_size, dir, file, NULL); 1366 if (loader_platform_file_exists(out_fullpath)) { 1367 return; 1368 } 1369 } 1370 } 1371 1372 snprintf(out_fullpath, out_size, "%s", file); 1373} 1374 1375/** 1376 * Read a JSON file into a buffer. 1377 * 1378 * \returns 1379 * A pointer to a cJSON object representing the JSON parse tree. 1380 * This returned buffer should be freed by caller. 1381 */ 1382static cJSON *loader_get_json(const char *filename) 1383{ 1384 FILE *file; 1385 char *json_buf; 1386 cJSON *json; 1387 uint64_t len; 1388 file = fopen(filename,"rb"); 1389 if (!file) { 1390 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Couldn't open JSON file %s", filename); 1391 return NULL; 1392 } 1393 fseek(file, 0, SEEK_END); 1394 len = ftell(file); 1395 fseek(file, 0, SEEK_SET); 1396 json_buf = (char*) loader_stack_alloc(len+1); 1397 if (json_buf == NULL) { 1398 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get JSON file"); 1399 fclose(file); 1400 return NULL; 1401 } 1402 if (fread(json_buf, sizeof(char), len, file) != len) { 1403 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "fread failed can't get JSON file"); 1404 fclose(file); 1405 return NULL; 1406 } 1407 fclose(file); 1408 json_buf[len] = '\0'; 1409 1410 //parse text from file 1411 json = cJSON_Parse(json_buf); 1412 if (json == NULL) 1413 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Can't parse JSON file %s", filename); 1414 return json; 1415} 1416 1417/** 1418 * Do a deep copy of the loader_layer_properties structure. 1419 */ 1420static void loader_copy_layer_properties( 1421 const struct loader_instance *inst, 1422 struct loader_layer_properties *dst, 1423 struct loader_layer_properties *src) 1424{ 1425 memcpy(dst, src, sizeof (*src)); 1426 dst->instance_extension_list.list = loader_heap_alloc( 1427 inst, 1428 sizeof(VkExtensionProperties) * 1429 src->instance_extension_list.count, 1430 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1431 dst->instance_extension_list.capacity = sizeof(VkExtensionProperties) * 1432 src->instance_extension_list.count; 1433 memcpy(dst->instance_extension_list.list, src->instance_extension_list.list, 1434 dst->instance_extension_list.capacity); 1435 dst->device_extension_list.list = loader_heap_alloc( 1436 inst, 1437 sizeof(VkExtensionProperties) * 1438 src->device_extension_list.count, 1439 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1440 dst->device_extension_list.capacity = sizeof(VkExtensionProperties) * 1441 src->device_extension_list.count; 1442 memcpy(dst->device_extension_list.list, src->device_extension_list.list, 1443 dst->device_extension_list.capacity); 1444} 1445 1446/** 1447 * Given a cJSON struct (json) of the top level JSON object from layer manifest 1448 * file, add entry to the layer_list. 1449 * Fill out the layer_properties in this list entry from the input cJSON object. 1450 * 1451 * \returns 1452 * void 1453 * layer_list has a new entry and initialized accordingly. 1454 * If the json input object does not have all the required fields no entry 1455 * is added to the list. 1456 */ 1457static void loader_add_layer_properties(const struct loader_instance *inst, 1458 struct loader_layer_list *layer_instance_list, 1459 struct loader_layer_list *layer_device_list, 1460 cJSON *json, 1461 bool is_implicit, 1462 char *filename) 1463{ 1464 /* Fields in layer manifest file that are required: 1465 * (required) “file_format_version” 1466 * following are required in the "layer" object: 1467 * (required) "name" 1468 * (required) "type" 1469 * (required) “library_path” 1470 * (required) “api_version” 1471 * (required) “implementation_version” 1472 * (required) “description” 1473 * (required for implicit layers) “disable_environment” 1474 * 1475 * First get all required items and if any missing abort 1476 */ 1477 1478 cJSON *item, *layer_node, *ext_item; 1479 char *temp; 1480 char *name, *type, *library_path, *api_version; 1481 char *implementation_version, *description; 1482 cJSON *disable_environment; 1483 int i; 1484 VkExtensionProperties ext_prop; 1485 item = cJSON_GetObjectItem(json, "file_format_version"); 1486 if (item == NULL) { 1487 return; 1488 } 1489 char *file_vers = cJSON_PrintUnformatted(item); 1490 loader_log(VK_DBG_REPORT_INFO_BIT, 0, "Found manifest file %s, version %s", 1491 filename, file_vers); 1492 if (strcmp(file_vers, "\"1.0.0\"") != 0) 1493 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Unexpected manifest file version (expected 1.0.0), may cause errors"); 1494 loader_tls_heap_free(file_vers); 1495 1496 layer_node = cJSON_GetObjectItem(json, "layer"); 1497 if (layer_node == NULL) { 1498 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"layer\" object in manifest JSON file, skipping"); 1499 return; 1500 } 1501 1502 // loop through all "layer" objects in the file 1503 do { 1504#define GET_JSON_OBJECT(node, var) { \ 1505 var = cJSON_GetObjectItem(node, #var); \ 1506 if (var == NULL) { \ 1507 layer_node = layer_node->next; \ 1508 continue; \ 1509 } \ 1510 } 1511#define GET_JSON_ITEM(node, var) { \ 1512 item = cJSON_GetObjectItem(node, #var); \ 1513 if (item == NULL) { \ 1514 layer_node = layer_node->next; \ 1515 continue; \ 1516 } \ 1517 temp = cJSON_Print(item); \ 1518 temp[strlen(temp) - 1] = '\0'; \ 1519 var = loader_stack_alloc(strlen(temp) + 1); \ 1520 strcpy(var, &temp[1]); \ 1521 loader_tls_heap_free(temp); \ 1522 } 1523 GET_JSON_ITEM(layer_node, name) 1524 GET_JSON_ITEM(layer_node, type) 1525 GET_JSON_ITEM(layer_node, library_path) 1526 GET_JSON_ITEM(layer_node, api_version) 1527 GET_JSON_ITEM(layer_node, implementation_version) 1528 GET_JSON_ITEM(layer_node, description) 1529 if (is_implicit) { 1530 GET_JSON_OBJECT(layer_node, disable_environment) 1531 } 1532#undef GET_JSON_ITEM 1533#undef GET_JSON_OBJECT 1534 1535 // add list entry 1536 struct loader_layer_properties *props=NULL; 1537 if (!strcmp(type, "DEVICE")) { 1538 if (layer_device_list == NULL) { 1539 layer_node = layer_node->next; 1540 continue; 1541 } 1542 props = loader_get_next_layer_property(inst, layer_device_list); 1543 props->type = (is_implicit) ? VK_LAYER_TYPE_DEVICE_IMPLICIT : VK_LAYER_TYPE_DEVICE_EXPLICIT; 1544 } 1545 if (!strcmp(type, "INSTANCE")) { 1546 if (layer_instance_list == NULL) { 1547 layer_node = layer_node->next; 1548 continue; 1549 } 1550 props = loader_get_next_layer_property(inst, layer_instance_list); 1551 props->type = (is_implicit) ? VK_LAYER_TYPE_INSTANCE_IMPLICIT : VK_LAYER_TYPE_INSTANCE_EXPLICIT; 1552 } 1553 if (!strcmp(type, "GLOBAL")) { 1554 if (layer_instance_list != NULL) 1555 props = loader_get_next_layer_property(inst, layer_instance_list); 1556 else if (layer_device_list != NULL) 1557 props = loader_get_next_layer_property(inst, layer_device_list); 1558 else { 1559 layer_node = layer_node->next; 1560 continue; 1561 } 1562 props->type = (is_implicit) ? VK_LAYER_TYPE_GLOBAL_IMPLICIT : VK_LAYER_TYPE_GLOBAL_EXPLICIT; 1563 } 1564 1565 if (props == NULL) { 1566 layer_node = layer_node->next; 1567 continue; 1568 } 1569 1570 strncpy(props->info.layerName, name, sizeof (props->info.layerName)); 1571 props->info.layerName[sizeof (props->info.layerName) - 1] = '\0'; 1572 1573 char *fullpath = props->lib_name; 1574 char *rel_base; 1575 if (loader_platform_is_path(filename)) { 1576 // a relative or absolute path 1577 char *name_copy = loader_stack_alloc(strlen(filename) + 1); 1578 strcpy(name_copy, filename); 1579 rel_base = loader_platform_dirname(name_copy); 1580 loader_expand_path(library_path, rel_base, MAX_STRING_SIZE, fullpath); 1581 } else { 1582 // a filename which is assumed in a system directory 1583 loader_get_fullpath(library_path, DEFAULT_VK_LAYERS_PATH, MAX_STRING_SIZE, fullpath); 1584 } 1585 props->info.specVersion = loader_make_version(api_version); 1586 props->info.implementationVersion = atoi(implementation_version); 1587 strncpy((char *) props->info.description, description, sizeof (props->info.description)); 1588 props->info.description[sizeof (props->info.description) - 1] = '\0'; 1589 if (is_implicit) { 1590 strncpy(props->disable_env_var.name, disable_environment->child->string, sizeof (props->disable_env_var.name)); 1591 props->disable_env_var.name[sizeof (props->disable_env_var.name) - 1] = '\0'; 1592 strncpy(props->disable_env_var.value, disable_environment->child->valuestring, sizeof (props->disable_env_var.value)); 1593 props->disable_env_var.value[sizeof (props->disable_env_var.value) - 1] = '\0'; 1594 } 1595 1596 /** 1597 * Now get all optional items and objects and put in list: 1598 * functions 1599 * instance_extensions 1600 * device_extensions 1601 * enable_environment (implicit layers only) 1602 */ 1603#define GET_JSON_OBJECT(node, var) { \ 1604 var = cJSON_GetObjectItem(node, #var); \ 1605 } 1606#define GET_JSON_ITEM(node, var) { \ 1607 item = cJSON_GetObjectItem(node, #var); \ 1608 if (item != NULL) { \ 1609 temp = cJSON_Print(item); \ 1610 temp[strlen(temp) - 1] = '\0'; \ 1611 var = loader_stack_alloc(strlen(temp) + 1);\ 1612 strcpy(var, &temp[1]); \ 1613 loader_tls_heap_free(temp); \ 1614 } \ 1615 } 1616 1617 cJSON *instance_extensions, *device_extensions, *functions, *enable_environment; 1618 char *vkGetInstanceProcAddr = NULL, *vkGetDeviceProcAddr = NULL, *spec_version=NULL; 1619 GET_JSON_OBJECT(layer_node, functions) 1620 if (functions != NULL) { 1621 GET_JSON_ITEM(functions, vkGetInstanceProcAddr) 1622 GET_JSON_ITEM(functions, vkGetDeviceProcAddr) 1623 if (vkGetInstanceProcAddr != NULL) 1624 strncpy(props->functions.str_gipa, vkGetInstanceProcAddr, sizeof (props->functions.str_gipa)); 1625 props->functions.str_gipa[sizeof (props->functions.str_gipa) - 1] = '\0'; 1626 if (vkGetDeviceProcAddr != NULL) 1627 strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr, sizeof (props->functions.str_gdpa)); 1628 props->functions.str_gdpa[sizeof (props->functions.str_gdpa) - 1] = '\0'; 1629 } 1630 GET_JSON_OBJECT(layer_node, instance_extensions) 1631 if (instance_extensions != NULL) { 1632 int count = cJSON_GetArraySize(instance_extensions); 1633 for (i = 0; i < count; i++) { 1634 ext_item = cJSON_GetArrayItem(instance_extensions, i); 1635 GET_JSON_ITEM(ext_item, name) 1636 GET_JSON_ITEM(ext_item, spec_version) 1637 strncpy(ext_prop.extensionName, name, sizeof (ext_prop.extensionName)); 1638 ext_prop.extensionName[sizeof (ext_prop.extensionName) - 1] = '\0'; 1639 ext_prop.specVersion = atoi(spec_version); 1640 loader_add_to_ext_list(inst, &props->instance_extension_list, 1, &ext_prop); 1641 } 1642 } 1643 GET_JSON_OBJECT(layer_node, device_extensions) 1644 if (device_extensions != NULL) { 1645 int count = cJSON_GetArraySize(device_extensions); 1646 for (i = 0; i < count; i++) { 1647 ext_item = cJSON_GetArrayItem(device_extensions, i); 1648 GET_JSON_ITEM(ext_item, name); 1649 GET_JSON_ITEM(ext_item, spec_version); 1650 strncpy(ext_prop.extensionName, name, sizeof (ext_prop.extensionName)); 1651 ext_prop.extensionName[sizeof (ext_prop.extensionName) - 1] = '\0'; 1652 ext_prop.specVersion = atoi(spec_version); 1653 loader_add_to_ext_list(inst, &props->device_extension_list, 1, &ext_prop); 1654 } 1655 } 1656 if (is_implicit) { 1657 GET_JSON_OBJECT(layer_node, enable_environment) 1658 strncpy(props->enable_env_var.name, enable_environment->child->string, sizeof (props->enable_env_var.name)); 1659 props->enable_env_var.name[sizeof (props->enable_env_var.name) - 1] = '\0'; 1660 strncpy(props->enable_env_var.value, enable_environment->child->valuestring, sizeof (props->enable_env_var.value)); 1661 props->enable_env_var.value[sizeof (props->enable_env_var.value) - 1] = '\0'; 1662 } 1663#undef GET_JSON_ITEM 1664#undef GET_JSON_OBJECT 1665 // for global layers need to add them to both device and instance list 1666 if (!strcmp(type, "GLOBAL")) { 1667 struct loader_layer_properties *dev_props; 1668 if (layer_instance_list == NULL || layer_device_list == NULL) { 1669 layer_node = layer_node->next; 1670 continue; 1671 } 1672 dev_props = loader_get_next_layer_property(inst, layer_device_list); 1673 //copy into device layer list 1674 loader_copy_layer_properties(inst, dev_props, props); 1675 } 1676 layer_node = layer_node->next; 1677 } while (layer_node != NULL); 1678 return; 1679} 1680 1681/** 1682 * Find the Vulkan library manifest files. 1683 * 1684 * This function scans the location or env_override directories/files 1685 * for a list of JSON manifest files. If env_override is non-NULL 1686 * and has a valid value. Then the location is ignored. Otherwise 1687 * location is used to look for manifest files. The location 1688 * is interpreted as Registry path on Windows and a directory path(s) 1689 * on Linux. 1690 * 1691 * \returns 1692 * A string list of manifest files to be opened in out_files param. 1693 * List has a pointer to string for each manifest filename. 1694 * When done using the list in out_files, pointers should be freed. 1695 * Location or override string lists can be either files or directories as follows: 1696 * | location | override 1697 * -------------------------------- 1698 * Win ICD | files | files 1699 * Win Layer | files | dirs 1700 * Linux ICD | dirs | files 1701 * Linux Layer| dirs | dirs 1702 */ 1703static void loader_get_manifest_files(const struct loader_instance *inst, 1704 const char *env_override, 1705 bool is_layer, 1706 const char *location, 1707 struct loader_manifest_files *out_files) 1708{ 1709 char *override = NULL; 1710 char *loc; 1711 char *file, *next_file, *name; 1712 size_t alloced_count = 64; 1713 char full_path[2048]; 1714 DIR *sysdir = NULL; 1715 bool list_is_dirs = false; 1716 struct dirent *dent; 1717 1718 out_files->count = 0; 1719 out_files->filename_list = NULL; 1720 1721 if (env_override != NULL && (override = getenv(env_override))) { 1722#if !defined(_WIN32) 1723 if (geteuid() != getuid()) { 1724 /* Don't allow setuid apps to use the env var: */ 1725 override = NULL; 1726 } 1727#endif 1728 } 1729 1730 if (location == NULL) { 1731 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 1732 "Can't get manifest files with NULL location, env_override=%s", 1733 env_override); 1734 return; 1735 } 1736 1737#if defined(_WIN32) 1738 list_is_dirs = (is_layer && override != NULL) ? true : false; 1739#else 1740 list_is_dirs = (override == NULL || is_layer) ? true : false; 1741#endif 1742 // Make a copy of the input we are using so it is not modified 1743 // Also handle getting the location(s) from registry on Windows 1744 if (override == NULL) { 1745 loc = loader_stack_alloc(strlen(location) + 1); 1746 if (loc == NULL) { 1747 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1748 return; 1749 } 1750 strcpy(loc, location); 1751#if defined(_WIN32) 1752 loc = loader_get_registry_files(inst, loc); 1753 if (loc == NULL) { 1754 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Registry lookup failed can't get manifest files"); 1755 return; 1756 } 1757#endif 1758 } 1759 else { 1760 loc = loader_stack_alloc(strlen(override) + 1); 1761 if (loc == NULL) { 1762 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1763 return; 1764 } 1765 strcpy(loc, override); 1766 } 1767 1768 // Print out the paths being searched if debugging is enabled 1769 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Searching the following paths for manifest files: %s\n", loc); 1770 1771 file = loc; 1772 while (*file) { 1773 next_file = loader_get_next_path(file); 1774 if (list_is_dirs) { 1775 sysdir = opendir(file); 1776 name = NULL; 1777 if (sysdir) { 1778 dent = readdir(sysdir); 1779 if (dent == NULL) 1780 break; 1781 name = &(dent->d_name[0]); 1782 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1783 name = full_path; 1784 } 1785 } 1786 else { 1787#if defined(_WIN32) 1788 name = file; 1789#else 1790 // only Linux has relative paths 1791 char *dir; 1792 // make a copy of location so it isn't modified 1793 dir = loader_stack_alloc(strlen(loc) + 1); 1794 if (dir == NULL) { 1795 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1796 return; 1797 } 1798 strcpy(dir, loc); 1799 1800 loader_get_fullpath(file, dir, sizeof(full_path), full_path); 1801 1802 name = full_path; 1803#endif 1804 } 1805 while (name) { 1806 /* Look for files ending with ".json" suffix */ 1807 uint32_t nlen = (uint32_t) strlen(name); 1808 const char *suf = name + nlen - 5; 1809 if ((nlen > 5) && !strncmp(suf, ".json", 5)) { 1810 if (out_files->count == 0) { 1811 out_files->filename_list = loader_heap_alloc(inst, 1812 alloced_count * sizeof(char *), 1813 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 1814 } 1815 else if (out_files->count == alloced_count) { 1816 out_files->filename_list = loader_heap_realloc(inst, 1817 out_files->filename_list, 1818 alloced_count * sizeof(char *), 1819 alloced_count * sizeof(char *) * 2, 1820 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 1821 alloced_count *= 2; 1822 } 1823 if (out_files->filename_list == NULL) { 1824 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't alloc manifest file list"); 1825 return; 1826 } 1827 out_files->filename_list[out_files->count] = loader_heap_alloc( 1828 inst, 1829 strlen(name) + 1, 1830 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 1831 if (out_files->filename_list[out_files->count] == NULL) { 1832 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1833 return; 1834 } 1835 strcpy(out_files->filename_list[out_files->count], name); 1836 out_files->count++; 1837 } else if (!list_is_dirs) { 1838 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Skipping manifest file %s, file name must end in .json", name); 1839 } 1840 if (list_is_dirs) { 1841 dent = readdir(sysdir); 1842 if (dent == NULL) 1843 break; 1844 name = &(dent->d_name[0]); 1845 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1846 name = full_path; 1847 } 1848 else { 1849 break; 1850 } 1851 } 1852 if (sysdir) 1853 closedir(sysdir); 1854 file = next_file; 1855 } 1856 return; 1857} 1858 1859void loader_init_icd_lib_list() 1860{ 1861 1862} 1863 1864void loader_destroy_icd_lib_list() 1865{ 1866 1867} 1868/** 1869 * Try to find the Vulkan ICD driver(s). 1870 * 1871 * This function scans the default system loader path(s) or path 1872 * specified by the \c VK_ICD_FILENAMES environment variable in 1873 * order to find loadable VK ICDs manifest files. From these 1874 * manifest files it finds the ICD libraries. 1875 * 1876 * \returns 1877 * a list of icds that were discovered 1878 */ 1879void loader_icd_scan( 1880 const struct loader_instance *inst, 1881 struct loader_icd_libs *icds) 1882{ 1883 char *file_str; 1884 struct loader_manifest_files manifest_files; 1885 1886 loader_scanned_icd_init(inst, icds); 1887 // Get a list of manifest files for ICDs 1888 loader_get_manifest_files(inst, "VK_ICD_FILENAMES", false, 1889 DEFAULT_VK_DRIVERS_INFO, &manifest_files); 1890 if (manifest_files.count == 0) 1891 return; 1892 loader_platform_thread_lock_mutex(&loader_json_lock); 1893 for (uint32_t i = 0; i < manifest_files.count; i++) { 1894 file_str = manifest_files.filename_list[i]; 1895 if (file_str == NULL) 1896 continue; 1897 1898 cJSON *json; 1899 json = loader_get_json(file_str); 1900 if (!json) 1901 continue; 1902 cJSON *item, *itemICD; 1903 item = cJSON_GetObjectItem(json, "file_format_version"); 1904 if (item == NULL) { 1905 loader_platform_thread_unlock_mutex(&loader_json_lock); 1906 return; 1907 } 1908 char *file_vers = cJSON_Print(item); 1909 loader_log(VK_DBG_REPORT_INFO_BIT, 0, "Found manifest file %s, version %s", 1910 file_str, file_vers); 1911 if (strcmp(file_vers, "\"1.0.0\"") != 0) 1912 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Unexpected manifest file version (expected 1.0.0), may cause errors"); 1913 loader_tls_heap_free(file_vers); 1914 itemICD = cJSON_GetObjectItem(json, "ICD"); 1915 if (itemICD != NULL) { 1916 item = cJSON_GetObjectItem(itemICD, "library_path"); 1917 if (item != NULL) { 1918 char *temp= cJSON_Print(item); 1919 if (!temp || strlen(temp) == 0) { 1920 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 1921 loader_tls_heap_free(temp); 1922 loader_heap_free(inst, file_str); 1923 cJSON_Delete(json); 1924 continue; 1925 } 1926 //strip out extra quotes 1927 temp[strlen(temp) - 1] = '\0'; 1928 char *library_path = loader_stack_alloc(strlen(temp) + 1); 1929 strcpy(library_path, &temp[1]); 1930 loader_tls_heap_free(temp); 1931 if (!library_path || strlen(library_path) == 0) { 1932 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 1933 loader_heap_free(inst, file_str); 1934 cJSON_Delete(json); 1935 continue; 1936 } 1937 char fullpath[MAX_STRING_SIZE]; 1938 // Print out the paths being searched if debugging is enabled 1939 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Searching for ICD drivers named %s default dir %s\n", library_path, DEFAULT_VK_DRIVERS_PATH); 1940 if (loader_platform_is_path(library_path)) { 1941 // a relative or absolute path 1942 char *name_copy = loader_stack_alloc(strlen(file_str) + 1); 1943 char *rel_base; 1944 strcpy(name_copy, file_str); 1945 rel_base = loader_platform_dirname(name_copy); 1946 loader_expand_path(library_path, rel_base, sizeof(fullpath), fullpath); 1947 } else { 1948 // a filename which is assumed in a system directory 1949 loader_get_fullpath(library_path, DEFAULT_VK_DRIVERS_PATH, sizeof(fullpath), fullpath); 1950 } 1951 1952 uint32_t vers = 0; 1953 item = cJSON_GetObjectItem(itemICD, "api_version"); 1954 if (item != NULL) { 1955 temp= cJSON_Print(item); 1956 vers = loader_make_version(temp); 1957 loader_tls_heap_free(temp); 1958 } 1959 loader_scanned_icd_add(inst, icds, fullpath, vers); 1960 } 1961 else 1962 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" object in ICD JSON file %s, skipping", file_str); 1963 } 1964 else 1965 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"ICD\" object in ICD JSON file %s, skipping", file_str); 1966 1967 loader_heap_free(inst, file_str); 1968 cJSON_Delete(json); 1969 } 1970 loader_heap_free(inst, manifest_files.filename_list); 1971 loader_platform_thread_unlock_mutex(&loader_json_lock); 1972} 1973 1974 1975void loader_layer_scan( 1976 const struct loader_instance *inst, 1977 struct loader_layer_list *instance_layers, 1978 struct loader_layer_list *device_layers) 1979{ 1980 char *file_str; 1981 struct loader_manifest_files manifest_files; 1982 cJSON *json; 1983 uint32_t i; 1984 1985 // Get a list of manifest files for layers 1986 loader_get_manifest_files(inst, LAYERS_PATH_ENV, true, DEFAULT_VK_LAYERS_INFO, 1987 &manifest_files); 1988 if (manifest_files.count == 0) 1989 return; 1990 1991#if 0 //TODO 1992 /** 1993 * We need a list of the layer libraries, not just a list of 1994 * the layer properties (a layer library could expose more than 1995 * one layer property). This list of scanned layers would be 1996 * used to check for global and physicaldevice layer properties. 1997 */ 1998 if (!loader_init_layer_library_list(&loader.scanned_layer_libraries)) { 1999 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2000 "Alloc for layer list failed: %s line: %d", __FILE__, __LINE__); 2001 return; 2002 } 2003#endif 2004 2005 /* cleanup any previously scanned libraries */ 2006 loader_delete_layer_properties(inst, instance_layers); 2007 loader_delete_layer_properties(inst, device_layers); 2008 2009 loader_platform_thread_lock_mutex(&loader_json_lock); 2010 for (i = 0; i < manifest_files.count; i++) { 2011 file_str = manifest_files.filename_list[i]; 2012 if (file_str == NULL) 2013 continue; 2014 2015 // parse file into JSON struct 2016 json = loader_get_json(file_str); 2017 if (!json) { 2018 continue; 2019 } 2020 2021 //TODO pass in implicit versus explicit bool 2022 //TODO error if device layers expose instance_extensions 2023 //TODO error if instance layers expose device extensions 2024 loader_add_layer_properties(inst, 2025 instance_layers, 2026 device_layers, 2027 json, 2028 false, 2029 file_str); 2030 2031 loader_heap_free(inst, file_str); 2032 cJSON_Delete(json); 2033 } 2034 loader_heap_free(inst, manifest_files.filename_list); 2035 loader_platform_thread_unlock_mutex(&loader_json_lock); 2036} 2037 2038static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_internal(VkInstance inst, const char * pName) 2039{ 2040 // inst is not wrapped 2041 if (inst == VK_NULL_HANDLE) { 2042 return NULL; 2043 } 2044 VkLayerInstanceDispatchTable* disp_table = * (VkLayerInstanceDispatchTable **) inst; 2045 void *addr; 2046 2047 if (!strcmp(pName, "vkGetInstanceProcAddr")) 2048 return (void *) loader_gpa_instance_internal; 2049 2050 if (disp_table == NULL) 2051 return NULL; 2052 2053 addr = loader_lookup_instance_dispatch_table(disp_table, pName); 2054 if (addr) { 2055 return addr; 2056 } 2057 2058 if (disp_table->GetInstanceProcAddr == NULL) { 2059 return NULL; 2060 } 2061 return disp_table->GetInstanceProcAddr(inst, pName); 2062} 2063 2064/** 2065 * Initialize device_ext dispatch table entry as follows: 2066 * If dev == NULL find all logical devices created within this instance and 2067 * init the entry (given by idx) in the ext dispatch table. 2068 * If dev != NULL only initialize the entry in the given dev's dispatch table. 2069 * The initialization value is gotten by calling down the device chain with GDPA. 2070 * If GDPA returns NULL then don't initialize the dispatch table entry. 2071 */ 2072static void loader_init_dispatch_dev_ext_entry(struct loader_instance *inst, 2073 struct loader_device *dev, 2074 uint32_t idx, 2075 const char *funcName) 2076 2077 { 2078 void *gdpa_value; 2079 if (dev != NULL) { 2080 gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr( 2081 dev->device, funcName); 2082 if (gdpa_value != NULL) 2083 dev->loader_dispatch.ext_dispatch.DevExt[idx] = (PFN_vkDevExt) gdpa_value; 2084 } else { 2085 for (uint32_t i = 0; i < inst->total_icd_count; i++) { 2086 struct loader_icd *icd = &inst->icds[i]; 2087 struct loader_device *dev = icd->logical_device_list; 2088 while (dev) { 2089 gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr( 2090 dev->device, funcName); 2091 if (gdpa_value != NULL) 2092 dev->loader_dispatch.ext_dispatch.DevExt[idx] = 2093 (PFN_vkDevExt) gdpa_value; 2094 dev = dev->next; 2095 } 2096 } 2097 } 2098 2099} 2100 2101/** 2102 * Find all dev extension in the hash table and initialize the dispatch table 2103 * for dev for each of those extension entrypoints found in hash table. 2104 2105 */ 2106static void loader_init_dispatch_dev_ext(struct loader_instance *inst, 2107 struct loader_device *dev) 2108{ 2109 for (uint32_t i = 0; i < MAX_NUM_DEV_EXTS; i++) { 2110 if (inst->disp_hash[i].func_name != NULL) 2111 loader_init_dispatch_dev_ext_entry(inst, dev, i, 2112 inst->disp_hash[i].func_name); 2113 } 2114} 2115 2116static bool loader_check_icds_for_address(struct loader_instance *inst, 2117 const char *funcName) 2118{ 2119 struct loader_icd *icd; 2120 icd = inst->icds; 2121 while (icd) { 2122 if (icd->this_icd_lib->GetInstanceProcAddr(icd->instance, funcName)) 2123 // this icd supports funcName 2124 return true; 2125 icd = icd->next; 2126 } 2127 2128 return false; 2129} 2130 2131static void loader_free_dev_ext_table(struct loader_instance *inst) 2132{ 2133 for (uint32_t i = 0; i < MAX_NUM_DEV_EXTS; i++) { 2134 loader_heap_free(inst, inst->disp_hash[i].func_name); 2135 loader_heap_free(inst, inst->disp_hash[i].list.index); 2136 2137 } 2138 memset(inst->disp_hash, 0, sizeof(inst->disp_hash)); 2139} 2140 2141static bool loader_add_dev_ext_table(struct loader_instance *inst, 2142 uint32_t *ptr_idx, 2143 const char *funcName) 2144{ 2145 uint32_t i; 2146 uint32_t idx = *ptr_idx; 2147 struct loader_dispatch_hash_list *list = &inst->disp_hash[idx].list; 2148 2149 if (!inst->disp_hash[idx].func_name) { 2150 // no entry here at this idx, so use it 2151 assert(list->capacity == 0); 2152 inst->disp_hash[idx].func_name = (char *) loader_heap_alloc(inst, 2153 strlen(funcName) + 1, 2154 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2155 if (inst->disp_hash[idx].func_name == NULL) { 2156 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2157 "loader_add_dev_ext_table() can't allocate memory for func_name"); 2158 return false; 2159 } 2160 strncpy(inst->disp_hash[idx].func_name, funcName, strlen(funcName) + 1); 2161 return true; 2162 } 2163 2164 // check for enough capacity 2165 if (list->capacity == 0) { 2166 list->index = loader_heap_alloc(inst, 8 * sizeof(*(list->index)), 2167 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2168 if (list->index == NULL) { 2169 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2170 "loader_add_dev_ext_table() can't allocate list memory"); 2171 return false; 2172 } 2173 list->capacity = 8 * sizeof(*(list->index)); 2174 } else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) { 2175 list->index = loader_heap_realloc(inst, list->index, list->capacity, 2176 list->capacity * 2, 2177 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2178 if (list->index == NULL) { 2179 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2180 "loader_add_dev_ext_table() can't reallocate list memory"); 2181 return false; 2182 } 2183 list->capacity *= 2; 2184 } 2185 2186 //find an unused index in the hash table and use it 2187 i = (idx + 1) % MAX_NUM_DEV_EXTS; 2188 do { 2189 if (!inst->disp_hash[i].func_name) { 2190 assert(inst->disp_hash[i].list.capacity == 0); 2191 inst->disp_hash[i].func_name = (char *) loader_heap_alloc(inst, 2192 strlen(funcName) + 1, 2193 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2194 if (inst->disp_hash[i].func_name == NULL) { 2195 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2196 "loader_add_dev_ext_table() can't rallocate func_name memory"); 2197 return false; 2198 } 2199 strncpy(inst->disp_hash[i].func_name, funcName, strlen(funcName) + 1); 2200 list->index[list->count] = i; 2201 list->count++; 2202 *ptr_idx = i; 2203 return true; 2204 } 2205 i = (i + 1) % MAX_NUM_DEV_EXTS; 2206 } while (i != idx); 2207 2208 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2209 "loader_add_dev_ext_table() couldn't insert into hash table; is it full?"); 2210 return false; 2211} 2212 2213static bool loader_name_in_dev_ext_table(struct loader_instance *inst, 2214 uint32_t *idx, 2215 const char *funcName) 2216{ 2217 uint32_t alt_idx; 2218 if (inst->disp_hash[*idx].func_name && !strcmp( 2219 inst->disp_hash[*idx].func_name, 2220 funcName)) 2221 return true; 2222 2223 // funcName wasn't at the primary spot in the hash table 2224 // search the list of secondary locations (shallow search, not deep search) 2225 for (uint32_t i = 0; i < inst->disp_hash[*idx].list.count; i++) { 2226 alt_idx = inst->disp_hash[*idx].list.index[i]; 2227 if (!strcmp(inst->disp_hash[*idx].func_name, funcName)) { 2228 *idx = alt_idx; 2229 return true; 2230 } 2231 } 2232 2233 return false; 2234} 2235 2236/** 2237 * This function returns generic trampoline code address for unknown entry points. 2238 * Presumably, these unknown entry points (as given by funcName) are device 2239 * extension entrypoints. A hash table is used to keep a list of unknown entry 2240 * points and their mapping to the device extension dispatch table 2241 * (struct loader_dev_ext_dispatch_table). 2242 * \returns 2243 * For a given entry point string (funcName), if an existing mapping is found the 2244 * trampoline address for that mapping is returned. Otherwise, this unknown entry point 2245 * has not been seen yet. Next check if a layer or ICD supports it. If so then a 2246 * new entry in the hash table is initialized and that trampoline address for 2247 * the new entry is returned. Null is returned if the hash table is full or 2248 * if no discovered layer or ICD returns a non-NULL GetProcAddr for it. 2249 */ 2250void *loader_dev_ext_gpa(struct loader_instance *inst, 2251 const char *funcName) 2252{ 2253 uint32_t idx; 2254 uint32_t seed = 0; 2255 2256 idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_DEV_EXTS; 2257 2258 if (loader_name_in_dev_ext_table(inst, &idx, funcName)) 2259 // found funcName already in hash 2260 return loader_get_dev_ext_trampoline(idx); 2261 2262 // Check if funcName is supported in either ICDs or a layer library 2263 if (!loader_check_icds_for_address(inst, funcName)) { 2264 // TODO Add check in layer libraries for support of address 2265 // if support found in layers continue on 2266 return NULL; 2267 } 2268 2269 if (loader_add_dev_ext_table(inst, &idx, funcName)) { 2270 // successfully added new table entry 2271 // init any dev dispatch table entrys as needed 2272 loader_init_dispatch_dev_ext_entry(inst, NULL, idx, funcName); 2273 return loader_get_dev_ext_trampoline(idx); 2274 } 2275 2276 return NULL; 2277} 2278 2279struct loader_instance *loader_get_instance(const VkInstance instance) 2280{ 2281 /* look up the loader_instance in our list by comparing dispatch tables, as 2282 * there is no guarantee the instance is still a loader_instance* after any 2283 * layers which wrap the instance object. 2284 */ 2285 const VkLayerInstanceDispatchTable *disp; 2286 struct loader_instance *ptr_instance = NULL; 2287 disp = loader_get_instance_dispatch(instance); 2288 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 2289 if (inst->disp == disp) { 2290 ptr_instance = inst; 2291 break; 2292 } 2293 } 2294 return ptr_instance; 2295} 2296 2297static loader_platform_dl_handle loader_add_layer_lib( 2298 const struct loader_instance *inst, 2299 const char *chain_type, 2300 struct loader_layer_properties *layer_prop) 2301{ 2302 struct loader_lib_info *new_layer_lib_list, *my_lib; 2303 size_t new_alloc_size; 2304 /* 2305 * TODO: We can now track this information in the 2306 * scanned_layer_libraries list. 2307 */ 2308 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2309 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2310 /* Have already loaded this library, just increment ref count */ 2311 loader.loaded_layer_lib_list[i].ref_count++; 2312 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2313 "%s Chain: Increment layer reference count for layer library %s", 2314 chain_type, layer_prop->lib_name); 2315 return loader.loaded_layer_lib_list[i].lib_handle; 2316 } 2317 } 2318 2319 /* Haven't seen this library so load it */ 2320 new_alloc_size = 0; 2321 if (loader.loaded_layer_lib_capacity == 0) 2322 new_alloc_size = 8 * sizeof(struct loader_lib_info); 2323 else if (loader.loaded_layer_lib_capacity <= loader.loaded_layer_lib_count * 2324 sizeof(struct loader_lib_info)) 2325 new_alloc_size = loader.loaded_layer_lib_capacity * 2; 2326 2327 if (new_alloc_size) { 2328 new_layer_lib_list = loader_heap_realloc( 2329 inst, loader.loaded_layer_lib_list, 2330 loader.loaded_layer_lib_capacity, 2331 new_alloc_size, 2332 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2333 if (!new_layer_lib_list) { 2334 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "loader: realloc failed in loader_add_layer_lib"); 2335 return NULL; 2336 } 2337 loader.loaded_layer_lib_capacity = new_alloc_size; 2338 } else 2339 new_layer_lib_list = loader.loaded_layer_lib_list; 2340 my_lib = &new_layer_lib_list[loader.loaded_layer_lib_count]; 2341 2342 strncpy(my_lib->lib_name, layer_prop->lib_name, sizeof(my_lib->lib_name)); 2343 my_lib->lib_name[sizeof(my_lib->lib_name) - 1] = '\0'; 2344 my_lib->ref_count = 0; 2345 my_lib->lib_handle = NULL; 2346 2347 if ((my_lib->lib_handle = loader_platform_open_library(my_lib->lib_name)) == NULL) { 2348 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2349 loader_platform_open_library_error(my_lib->lib_name)); 2350 return NULL; 2351 } else { 2352 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2353 "Chain: %s: Loading layer library %s", 2354 chain_type, layer_prop->lib_name); 2355 } 2356 loader.loaded_layer_lib_count++; 2357 loader.loaded_layer_lib_list = new_layer_lib_list; 2358 my_lib->ref_count++; 2359 2360 return my_lib->lib_handle; 2361} 2362 2363static void loader_remove_layer_lib( 2364 struct loader_instance *inst, 2365 struct loader_layer_properties *layer_prop) 2366{ 2367 uint32_t idx; 2368 struct loader_lib_info *new_layer_lib_list, *my_lib = NULL; 2369 2370 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2371 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2372 /* found matching library */ 2373 idx = i; 2374 my_lib = &loader.loaded_layer_lib_list[i]; 2375 break; 2376 } 2377 } 2378 2379 if (my_lib) { 2380 my_lib->ref_count--; 2381 if (my_lib->ref_count > 0) { 2382 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2383 "Decrement reference count for layer library %s", layer_prop->lib_name); 2384 return; 2385 } 2386 } 2387 loader_platform_close_library(my_lib->lib_handle); 2388 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2389 "Unloading layer library %s", layer_prop->lib_name); 2390 2391 /* Need to remove unused library from list */ 2392 new_layer_lib_list = loader_heap_alloc(inst, 2393 loader.loaded_layer_lib_capacity, 2394 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2395 if (!new_layer_lib_list) { 2396 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "loader: heap alloc failed loader_remove_layer_library"); 2397 return; 2398 } 2399 2400 if (idx > 0) { 2401 /* Copy records before idx */ 2402 memcpy(new_layer_lib_list, &loader.loaded_layer_lib_list[0], 2403 sizeof(struct loader_lib_info) * idx); 2404 } 2405 if (idx < (loader.loaded_layer_lib_count - 1)) { 2406 /* Copy records after idx */ 2407 memcpy(&new_layer_lib_list[idx], &loader.loaded_layer_lib_list[idx+1], 2408 sizeof(struct loader_lib_info) * (loader.loaded_layer_lib_count - idx - 1)); 2409 } 2410 2411 loader_heap_free(inst, loader.loaded_layer_lib_list); 2412 loader.loaded_layer_lib_count--; 2413 loader.loaded_layer_lib_list = new_layer_lib_list; 2414} 2415 2416 2417/** 2418 * Go through the search_list and find any layers which match type. If layer 2419 * type match is found in then add it to ext_list. 2420 */ 2421//TODO need to handle implict layer enable env var and disable env var 2422static void loader_add_layer_implicit( 2423 const struct loader_instance *inst, 2424 const enum layer_type type, 2425 struct loader_layer_list *list, 2426 const struct loader_layer_list *search_list) 2427{ 2428 uint32_t i; 2429 for (i = 0; i < search_list->count; i++) { 2430 const struct loader_layer_properties *prop = &search_list->list[i]; 2431 if (prop->type & type) { 2432 /* Found an layer with the same type, add to layer_list */ 2433 loader_add_to_layer_list(inst, list, 1, prop); 2434 } 2435 } 2436 2437} 2438 2439/** 2440 * Get the layer name(s) from the env_name environment variable. If layer 2441 * is found in search_list then add it to layer_list. But only add it to 2442 * layer_list if type matches. 2443 */ 2444static void loader_add_layer_env( 2445 const struct loader_instance *inst, 2446 const enum layer_type type, 2447 const char *env_name, 2448 struct loader_layer_list *layer_list, 2449 const struct loader_layer_list *search_list) 2450{ 2451 char *layerEnv; 2452 char *next, *name; 2453 2454 layerEnv = getenv(env_name); 2455 if (layerEnv == NULL) { 2456 return; 2457 } 2458 name = loader_stack_alloc(strlen(layerEnv) + 1); 2459 if (name == NULL) { 2460 return; 2461 } 2462 strcpy(name, layerEnv); 2463 2464 while (name && *name ) { 2465 next = loader_get_next_path(name); 2466 loader_find_layer_name_add_list(inst, name, type, search_list, layer_list); 2467 name = next; 2468 } 2469 2470 return; 2471} 2472 2473void loader_deactivate_instance_layers(struct loader_instance *instance) 2474{ 2475 if (!instance->activated_layer_list.count) { 2476 return; 2477 } 2478 2479 /* Create instance chain of enabled layers */ 2480 for (uint32_t i = 0; i < instance->activated_layer_list.count; i++) { 2481 struct loader_layer_properties *layer_prop = &instance->activated_layer_list.list[i]; 2482 2483 loader_remove_layer_lib(instance, layer_prop); 2484 } 2485 loader_destroy_layer_list(instance, &instance->activated_layer_list); 2486} 2487 2488VkResult loader_enable_instance_layers( 2489 struct loader_instance *inst, 2490 const VkInstanceCreateInfo *pCreateInfo, 2491 const struct loader_layer_list *instance_layers) 2492{ 2493 VkResult err; 2494 2495 assert(inst && "Cannot have null instance"); 2496 2497 if (!loader_init_layer_list(inst, &inst->activated_layer_list)) { 2498 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Instance activated layer list"); 2499 return VK_ERROR_OUT_OF_HOST_MEMORY; 2500 } 2501 2502 /* Add any implicit layers first */ 2503 loader_add_layer_implicit( 2504 inst, 2505 VK_LAYER_TYPE_INSTANCE_IMPLICIT, 2506 &inst->activated_layer_list, 2507 instance_layers); 2508 2509 /* Add any layers specified via environment variable next */ 2510 loader_add_layer_env( 2511 inst, 2512 VK_LAYER_TYPE_INSTANCE_EXPLICIT, 2513 "VK_INSTANCE_LAYERS", 2514 &inst->activated_layer_list, 2515 instance_layers); 2516 2517 /* Add layers specified by the application */ 2518 err = loader_add_layer_names_to_list( 2519 inst, 2520 &inst->activated_layer_list, 2521 pCreateInfo->enabledLayerNameCount, 2522 pCreateInfo->ppEnabledLayerNames, 2523 instance_layers); 2524 2525 return err; 2526} 2527 2528uint32_t loader_activate_instance_layers(struct loader_instance *inst) 2529{ 2530 uint32_t layer_idx; 2531 VkBaseLayerObject *wrappedInstance; 2532 2533 if (inst == NULL) { 2534 return 0; 2535 } 2536 2537 // NOTE inst is unwrapped at this point in time 2538 void* baseObj = (void*) inst; 2539 void* nextObj = (void*) inst; 2540 VkBaseLayerObject *nextInstObj; 2541 PFN_vkGetInstanceProcAddr nextGPA = loader_gpa_instance_internal; 2542 2543 if (!inst->activated_layer_list.count) { 2544 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2545 return 0; 2546 } 2547 2548 wrappedInstance = loader_stack_alloc(sizeof(VkBaseLayerObject) 2549 * inst->activated_layer_list.count); 2550 if (!wrappedInstance) { 2551 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Instance objects for layer"); 2552 return 0; 2553 } 2554 2555 /* Create instance chain of enabled layers */ 2556 layer_idx = inst->activated_layer_list.count - 1; 2557 for (int32_t i = inst->activated_layer_list.count - 1; i >= 0; i--) { 2558 struct loader_layer_properties *layer_prop = &inst->activated_layer_list.list[i]; 2559 loader_platform_dl_handle lib_handle; 2560 2561 /* 2562 * Note: An extension's Get*ProcAddr should not return a function pointer for 2563 * any extension entry points until the extension has been enabled. 2564 * To do this requires a different behavior from Get*ProcAddr functions implemented 2565 * in layers. 2566 * The very first call to a layer will be it's Get*ProcAddr function requesting 2567 * the layer's vkGet*ProcAddr. The layer should initialize its internal dispatch table 2568 * with the wrapped object given (either Instance or Device) and return the layer's 2569 * Get*ProcAddr function. The layer should also use this opportunity to record the 2570 * baseObject so that it can find the correct local dispatch table on future calls. 2571 * Subsequent calls to Get*ProcAddr, CreateInstance, CreateDevice 2572 * will not use a wrapped object and must look up their local dispatch table from 2573 * the given baseObject. 2574 */ 2575 nextInstObj = (wrappedInstance + layer_idx); 2576 nextInstObj->pGPA = (PFN_vkGPA) nextGPA; 2577 nextInstObj->baseObject = baseObj; 2578 nextInstObj->nextObject = nextObj; 2579 nextObj = (void*) nextInstObj; 2580 2581 lib_handle = loader_add_layer_lib(inst, "instance", layer_prop); 2582 if ((nextGPA = layer_prop->functions.get_instance_proc_addr) == NULL) { 2583 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) { 2584 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); 2585 layer_prop->functions.get_instance_proc_addr = nextGPA; 2586 } else 2587 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa); 2588 if (!nextGPA) { 2589 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to find vkGetInstanceProcAddr in layer %s", layer_prop->lib_name); 2590 2591 /* TODO: Should we return nextObj, nextGPA to previous? or decrement layer_list count*/ 2592 continue; 2593 } 2594 } 2595 2596 loader_log(VK_DBG_REPORT_INFO_BIT, 0, 2597 "Insert instance layer %s (%s)", 2598 layer_prop->info.layerName, 2599 layer_prop->lib_name); 2600 2601 layer_idx--; 2602 } 2603 2604 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2605 2606 return inst->activated_layer_list.count; 2607} 2608 2609void loader_activate_instance_layer_extensions(struct loader_instance *inst) 2610{ 2611 2612 loader_init_instance_extension_dispatch_table(inst->disp, 2613 inst->disp->GetInstanceProcAddr, 2614 (VkInstance) inst); 2615} 2616 2617static VkResult loader_enable_device_layers( 2618 const struct loader_instance *inst, 2619 struct loader_icd *icd, 2620 struct loader_device *dev, 2621 const VkDeviceCreateInfo *pCreateInfo, 2622 const struct loader_layer_list *device_layers) 2623 2624{ 2625 VkResult err; 2626 2627 assert(dev && "Cannot have null device"); 2628 2629 if (dev->activated_layer_list.list == NULL || dev->activated_layer_list.capacity == 0) { 2630 loader_init_layer_list(inst, &dev->activated_layer_list); 2631 } 2632 2633 if (dev->activated_layer_list.list == NULL) { 2634 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc device activated layer list"); 2635 return VK_ERROR_OUT_OF_HOST_MEMORY; 2636 } 2637 2638 /* Add any implicit layers first */ 2639 loader_add_layer_implicit( 2640 inst, 2641 VK_LAYER_TYPE_DEVICE_IMPLICIT, 2642 &dev->activated_layer_list, 2643 device_layers); 2644 2645 /* Add any layers specified via environment variable next */ 2646 loader_add_layer_env( 2647 inst, 2648 VK_LAYER_TYPE_DEVICE_EXPLICIT, 2649 "VK_DEVICE_LAYERS", 2650 &dev->activated_layer_list, 2651 device_layers); 2652 2653 /* Add layers specified by the application */ 2654 err = loader_add_layer_names_to_list( 2655 inst, 2656 &dev->activated_layer_list, 2657 pCreateInfo->enabledLayerNameCount, 2658 pCreateInfo->ppEnabledLayerNames, 2659 device_layers); 2660 2661 return err; 2662} 2663 2664/* 2665 * This function terminates the device chain for CreateDevice. 2666 * CreateDevice is a special case and so the loader call's 2667 * the ICD's CreateDevice before creating the chain. Since 2668 * we can't call CreateDevice twice we must terminate the 2669 * device chain with something else. 2670 */ 2671static VKAPI_ATTR VkResult VKAPI_CALL scratch_vkCreateDevice( 2672 VkPhysicalDevice physicalDevice, 2673 const VkDeviceCreateInfo *pCreateInfo, 2674 const VkAllocationCallbacks* pAllocator, 2675 VkDevice *pDevice) 2676{ 2677 return VK_SUCCESS; 2678} 2679 2680static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_GetDeviceChainProcAddr(VkDevice device, const char * name) 2681{ 2682 if (!strcmp(name, "vkGetDeviceProcAddr")) 2683 return (PFN_vkVoidFunction) loader_GetDeviceChainProcAddr; 2684 if (!strcmp(name, "vkCreateDevice")) 2685 return (PFN_vkVoidFunction) scratch_vkCreateDevice; 2686 2687 struct loader_device *found_dev; 2688 struct loader_icd *icd = loader_get_icd_and_device(device, &found_dev); 2689 return icd->GetDeviceProcAddr(device, name); 2690} 2691 2692static uint32_t loader_activate_device_layers( 2693 const struct loader_instance *inst, 2694 struct loader_device *dev, 2695 VkDevice device) 2696{ 2697 if (!dev) { 2698 return 0; 2699 } 2700 2701 /* activate any layer libraries */ 2702 void* nextObj = (void*) device; 2703 void* baseObj = nextObj; 2704 VkBaseLayerObject *nextGpuObj; 2705 PFN_vkGetDeviceProcAddr nextGPA = loader_GetDeviceChainProcAddr; 2706 VkBaseLayerObject *wrappedGpus; 2707 2708 if (!dev->activated_layer_list.count) { 2709 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2710 (VkDevice) nextObj, (VkDevice) baseObj); 2711 return 0; 2712 } 2713 2714 wrappedGpus = loader_heap_alloc(inst, 2715 sizeof (VkBaseLayerObject) * dev->activated_layer_list.count, 2716 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2717 if (!wrappedGpus) { 2718 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Gpu objects for layer"); 2719 return 0; 2720 } 2721 2722 for (int32_t i = dev->activated_layer_list.count - 1; i >= 0; i--) { 2723 2724 struct loader_layer_properties *layer_prop = &dev->activated_layer_list.list[i]; 2725 loader_platform_dl_handle lib_handle; 2726 2727 nextGpuObj = (wrappedGpus + i); 2728 nextGpuObj->pGPA = (PFN_vkGPA)nextGPA; 2729 nextGpuObj->baseObject = baseObj; 2730 nextGpuObj->nextObject = nextObj; 2731 nextObj = (void*) nextGpuObj; 2732 2733 lib_handle = loader_add_layer_lib(inst, "device", layer_prop); 2734 if ((nextGPA = layer_prop->functions.get_device_proc_addr) == NULL) { 2735 if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) { 2736 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr"); 2737 layer_prop->functions.get_device_proc_addr = nextGPA; 2738 } else 2739 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa); 2740 if (!nextGPA) { 2741 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to find vkGetDeviceProcAddr in layer %s", layer_prop->lib_name); 2742 continue; 2743 } 2744 } 2745 2746 loader_log(VK_DBG_REPORT_INFO_BIT, 0, 2747 "Insert device layer library %s (%s)", 2748 layer_prop->info.layerName, 2749 layer_prop->lib_name); 2750 2751 } 2752 2753 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2754 (VkDevice) nextObj, (VkDevice) baseObj); 2755 loader_heap_free(inst, wrappedGpus); 2756 2757 return dev->activated_layer_list.count; 2758} 2759 2760VkResult loader_validate_layers( 2761 const uint32_t layer_count, 2762 const char * const *ppEnabledLayerNames, 2763 const struct loader_layer_list *list) 2764{ 2765 struct loader_layer_properties *prop; 2766 2767 for (uint32_t i = 0; i < layer_count; i++) { 2768 prop = loader_get_layer_property(ppEnabledLayerNames[i], 2769 list); 2770 if (!prop) { 2771 return VK_ERROR_LAYER_NOT_PRESENT; 2772 } 2773 } 2774 2775 return VK_SUCCESS; 2776} 2777 2778VkResult loader_validate_instance_extensions( 2779 const struct loader_extension_list *icd_exts, 2780 const struct loader_layer_list *instance_layer, 2781 const VkInstanceCreateInfo *pCreateInfo) 2782{ 2783 VkExtensionProperties *extension_prop; 2784 struct loader_layer_properties *layer_prop; 2785 2786 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 2787 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2788 icd_exts); 2789 2790 if (extension_prop) { 2791 continue; 2792 } 2793 2794 extension_prop = NULL; 2795 2796 /* Not in global list, search layer extension lists */ 2797 for (uint32_t j = 0; j < pCreateInfo->enabledLayerNameCount; j++) { 2798 layer_prop = loader_get_layer_property(pCreateInfo->ppEnabledLayerNames[i], 2799 instance_layer); 2800 if (!layer_prop) { 2801 /* Should NOT get here, loader_validate_layers 2802 * should have already filtered this case out. 2803 */ 2804 continue; 2805 } 2806 2807 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2808 &layer_prop->instance_extension_list); 2809 if (extension_prop) { 2810 /* Found the extension in one of the layers enabled by the app. */ 2811 break; 2812 } 2813 } 2814 2815 if (!extension_prop) { 2816 /* Didn't find extension name in any of the global layers, error out */ 2817 return VK_ERROR_EXTENSION_NOT_PRESENT; 2818 } 2819 } 2820 return VK_SUCCESS; 2821} 2822 2823VkResult loader_validate_device_extensions( 2824 struct loader_physical_device *phys_dev, 2825 const struct loader_layer_list *device_layer, 2826 const VkDeviceCreateInfo *pCreateInfo) 2827{ 2828 VkExtensionProperties *extension_prop; 2829 struct loader_layer_properties *layer_prop; 2830 2831 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 2832 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 2833 extension_prop = get_extension_property(extension_name, 2834 &phys_dev->device_extension_cache); 2835 2836 if (extension_prop) { 2837 continue; 2838 } 2839 2840 /* Not in global list, search layer extension lists */ 2841 for (uint32_t j = 0; j < pCreateInfo->enabledLayerNameCount; j++) { 2842 const char *layer_name = pCreateInfo->ppEnabledLayerNames[j]; 2843 layer_prop = loader_get_layer_property(layer_name, 2844 device_layer); 2845 2846 if (!layer_prop) { 2847 /* Should NOT get here, loader_validate_instance_layers 2848 * should have already filtered this case out. 2849 */ 2850 continue; 2851 } 2852 2853 extension_prop = get_extension_property(extension_name, 2854 &layer_prop->device_extension_list); 2855 if (extension_prop) { 2856 /* Found the extension in one of the layers enabled by the app. */ 2857 break; 2858 } 2859 } 2860 2861 if (!extension_prop) { 2862 /* Didn't find extension name in any of the device layers, error out */ 2863 return VK_ERROR_EXTENSION_NOT_PRESENT; 2864 } 2865 } 2866 return VK_SUCCESS; 2867} 2868 2869VKAPI_ATTR VkResult VKAPI_CALL loader_CreateInstance( 2870 const VkInstanceCreateInfo* pCreateInfo, 2871 const VkAllocationCallbacks* pAllocator, 2872 VkInstance* pInstance) 2873{ 2874 struct loader_instance *ptr_instance = *(struct loader_instance **) pInstance; 2875 struct loader_icd *icd; 2876 VkExtensionProperties *prop; 2877 char **filtered_extension_names = NULL; 2878 VkInstanceCreateInfo icd_create_info; 2879 VkResult res = VK_SUCCESS; 2880 bool success; 2881 2882 icd_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; 2883 icd_create_info.enabledLayerNameCount = 0; 2884 icd_create_info.ppEnabledLayerNames = NULL; 2885 icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo; 2886 icd_create_info.pNext = pCreateInfo->pNext; 2887 2888 /* 2889 * NOTE: Need to filter the extensions to only those 2890 * supported by the ICD. 2891 * No ICD will advertise support for layers. An ICD 2892 * library could support a layer, but it would be 2893 * independent of the actual ICD, just in the same library. 2894 */ 2895 filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionNameCount * sizeof(char *)); 2896 if (!filtered_extension_names) { 2897 return VK_ERROR_OUT_OF_HOST_MEMORY; 2898 } 2899 icd_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 2900 2901 for (uint32_t i = 0; i < ptr_instance->icd_libs.count; i++) { 2902 icd = loader_icd_add(ptr_instance, &ptr_instance->icd_libs.list[i]); 2903 if (icd) { 2904 icd_create_info.enabledExtensionNameCount = 0; 2905 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 2906 prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2907 &ptr_instance->ext_list); 2908 if (prop) { 2909 filtered_extension_names[icd_create_info.enabledExtensionNameCount] = (char *) pCreateInfo->ppEnabledExtensionNames[i]; 2910 icd_create_info.enabledExtensionNameCount++; 2911 } 2912 } 2913 2914 res = ptr_instance->icd_libs.list[i].CreateInstance(&icd_create_info, 2915 pAllocator, 2916 &(icd->instance)); 2917 success = loader_icd_init_entrys( 2918 icd, 2919 icd->instance, 2920 ptr_instance->icd_libs.list[i].GetInstanceProcAddr); 2921 2922 if (res != VK_SUCCESS || !success) 2923 { 2924 ptr_instance->icds = ptr_instance->icds->next; 2925 loader_icd_destroy(ptr_instance, icd); 2926 icd->instance = VK_NULL_HANDLE; 2927 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2928 "ICD ignored: failed to CreateInstance and find entrypoints with ICD"); 2929 } 2930 } 2931 } 2932 2933 /* 2934 * If no ICDs were added to instance list and res is unchanged 2935 * from it's initial value, the loader was unable to find 2936 * a suitable ICD. 2937 */ 2938 if (ptr_instance->icds == NULL) { 2939 if (res == VK_SUCCESS) { 2940 return VK_ERROR_INCOMPATIBLE_DRIVER; 2941 } else { 2942 return res; 2943 } 2944 } 2945 2946 return VK_SUCCESS; 2947} 2948 2949VKAPI_ATTR void VKAPI_CALL loader_DestroyInstance( 2950 VkInstance instance, 2951 const VkAllocationCallbacks* pAllocator) 2952{ 2953 struct loader_instance *ptr_instance = loader_instance(instance); 2954 struct loader_icd *icds = ptr_instance->icds; 2955 struct loader_icd *next_icd; 2956 2957 // Remove this instance from the list of instances: 2958 struct loader_instance *prev = NULL; 2959 struct loader_instance *next = loader.instances; 2960 while (next != NULL) { 2961 if (next == ptr_instance) { 2962 // Remove this instance from the list: 2963 if (prev) 2964 prev->next = next->next; 2965 else 2966 loader.instances = next->next; 2967 break; 2968 } 2969 prev = next; 2970 next = next->next; 2971 } 2972 2973 while (icds) { 2974 if (icds->instance) { 2975 icds->DestroyInstance(icds->instance, pAllocator); 2976 } 2977 next_icd = icds->next; 2978 icds->instance = VK_NULL_HANDLE; 2979 loader_icd_destroy(ptr_instance, icds); 2980 2981 icds = next_icd; 2982 } 2983 loader_delete_layer_properties(ptr_instance, &ptr_instance->device_layer_list); 2984 loader_delete_layer_properties(ptr_instance, &ptr_instance->instance_layer_list); 2985 loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_libs); 2986 loader_destroy_ext_list(ptr_instance, &ptr_instance->ext_list); 2987 for (uint32_t i = 0; i < ptr_instance->total_gpu_count; i++) 2988 loader_destroy_ext_list(ptr_instance, &ptr_instance->phys_devs[i].device_extension_cache); 2989 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 2990 loader_free_dev_ext_table(ptr_instance); 2991} 2992 2993VkResult loader_init_physical_device_info(struct loader_instance *ptr_instance) 2994{ 2995 struct loader_icd *icd; 2996 uint32_t i, j, idx, count = 0; 2997 VkResult res; 2998 struct loader_phys_dev_per_icd *phys_devs; 2999 3000 ptr_instance->total_gpu_count = 0; 3001 phys_devs = (struct loader_phys_dev_per_icd *) loader_stack_alloc( 3002 sizeof(struct loader_phys_dev_per_icd) * 3003 ptr_instance->total_icd_count); 3004 if (!phys_devs) 3005 return VK_ERROR_OUT_OF_HOST_MEMORY; 3006 3007 icd = ptr_instance->icds; 3008 for (i = 0; i < ptr_instance->total_icd_count; i++) { 3009 assert(icd); 3010 res = icd->EnumeratePhysicalDevices(icd->instance, &phys_devs[i].count, NULL); 3011 if (res != VK_SUCCESS) 3012 return res; 3013 count += phys_devs[i].count; 3014 icd = icd->next; 3015 } 3016 3017 ptr_instance->phys_devs = (struct loader_physical_device *) loader_heap_alloc( 3018 ptr_instance, 3019 count * sizeof(struct loader_physical_device), 3020 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 3021 if (!ptr_instance->phys_devs) 3022 return VK_ERROR_OUT_OF_HOST_MEMORY; 3023 3024 icd = ptr_instance->icds; 3025 3026 struct loader_physical_device *inst_phys_devs = ptr_instance->phys_devs; 3027 idx = 0; 3028 for (i = 0; i < ptr_instance->total_icd_count; i++) { 3029 assert(icd); 3030 3031 phys_devs[i].phys_devs = (VkPhysicalDevice *) loader_stack_alloc( 3032 phys_devs[i].count * sizeof(VkPhysicalDevice)); 3033 if (!phys_devs[i].phys_devs) { 3034 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3035 ptr_instance->phys_devs = NULL; 3036 return VK_ERROR_OUT_OF_HOST_MEMORY; 3037 } 3038 res = icd->EnumeratePhysicalDevices( 3039 icd->instance, 3040 &(phys_devs[i].count), 3041 phys_devs[i].phys_devs); 3042 if ((res == VK_SUCCESS)) { 3043 ptr_instance->total_gpu_count += phys_devs[i].count; 3044 for (j = 0; j < phys_devs[i].count; j++) { 3045 3046 // initialize the loader's physicalDevice object 3047 loader_set_dispatch((void *) &inst_phys_devs[idx], ptr_instance->disp); 3048 inst_phys_devs[idx].this_instance = ptr_instance; 3049 inst_phys_devs[idx].this_icd = icd; 3050 inst_phys_devs[idx].phys_dev = phys_devs[i].phys_devs[j]; 3051 memset(&inst_phys_devs[idx].device_extension_cache, 0, sizeof(struct loader_extension_list)); 3052 3053 idx++; 3054 } 3055 } else { 3056 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3057 ptr_instance->phys_devs = NULL; 3058 return res; 3059 } 3060 3061 icd = icd->next; 3062 } 3063 3064 return VK_SUCCESS; 3065} 3066 3067VKAPI_ATTR VkResult VKAPI_CALL loader_EnumeratePhysicalDevices( 3068 VkInstance instance, 3069 uint32_t* pPhysicalDeviceCount, 3070 VkPhysicalDevice* pPhysicalDevices) 3071{ 3072 uint32_t i; 3073 struct loader_instance *ptr_instance = (struct loader_instance *) instance; 3074 VkResult res = VK_SUCCESS; 3075 3076 if (ptr_instance->total_gpu_count == 0) { 3077 res = loader_init_physical_device_info(ptr_instance); 3078 } 3079 3080 *pPhysicalDeviceCount = ptr_instance->total_gpu_count; 3081 if (!pPhysicalDevices) { 3082 return res; 3083 } 3084 3085 for (i = 0; i < ptr_instance->total_gpu_count; i++) { 3086 pPhysicalDevices[i] = (VkPhysicalDevice) &ptr_instance->phys_devs[i]; 3087 } 3088 3089 return res; 3090} 3091 3092VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceProperties( 3093 VkPhysicalDevice physicalDevice, 3094 VkPhysicalDeviceProperties* pProperties) 3095{ 3096 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3097 struct loader_icd *icd = phys_dev->this_icd; 3098 3099 if (icd->GetPhysicalDeviceProperties) 3100 icd->GetPhysicalDeviceProperties(phys_dev->phys_dev, pProperties); 3101} 3102 3103VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceQueueFamilyProperties ( 3104 VkPhysicalDevice physicalDevice, 3105 uint32_t* pQueueFamilyPropertyCount, 3106 VkQueueFamilyProperties* pProperties) 3107{ 3108 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3109 struct loader_icd *icd = phys_dev->this_icd; 3110 3111 if (icd->GetPhysicalDeviceQueueFamilyProperties) 3112 icd->GetPhysicalDeviceQueueFamilyProperties(phys_dev->phys_dev, pQueueFamilyPropertyCount, pProperties); 3113} 3114 3115VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceMemoryProperties ( 3116 VkPhysicalDevice physicalDevice, 3117 VkPhysicalDeviceMemoryProperties* pProperties) 3118{ 3119 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3120 struct loader_icd *icd = phys_dev->this_icd; 3121 3122 if (icd->GetPhysicalDeviceMemoryProperties) 3123 icd->GetPhysicalDeviceMemoryProperties(phys_dev->phys_dev, pProperties); 3124} 3125 3126VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceFeatures( 3127 VkPhysicalDevice physicalDevice, 3128 VkPhysicalDeviceFeatures* pFeatures) 3129{ 3130 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3131 struct loader_icd *icd = phys_dev->this_icd; 3132 3133 if (icd->GetPhysicalDeviceFeatures) 3134 icd->GetPhysicalDeviceFeatures(phys_dev->phys_dev, pFeatures); 3135} 3136 3137VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceFormatProperties( 3138 VkPhysicalDevice physicalDevice, 3139 VkFormat format, 3140 VkFormatProperties* pFormatInfo) 3141{ 3142 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3143 struct loader_icd *icd = phys_dev->this_icd; 3144 3145 if (icd->GetPhysicalDeviceFormatProperties) 3146 icd->GetPhysicalDeviceFormatProperties(phys_dev->phys_dev, format, pFormatInfo); 3147} 3148 3149VKAPI_ATTR VkResult VKAPI_CALL loader_GetPhysicalDeviceImageFormatProperties( 3150 VkPhysicalDevice physicalDevice, 3151 VkFormat format, 3152 VkImageType type, 3153 VkImageTiling tiling, 3154 VkImageUsageFlags usage, 3155 VkImageCreateFlags flags, 3156 VkImageFormatProperties* pImageFormatProperties) 3157{ 3158 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3159 struct loader_icd *icd = phys_dev->this_icd; 3160 3161 if (!icd->GetPhysicalDeviceImageFormatProperties) 3162 return VK_ERROR_INITIALIZATION_FAILED; 3163 3164 return icd->GetPhysicalDeviceImageFormatProperties(phys_dev->phys_dev, format, 3165 type, tiling, usage, flags, pImageFormatProperties); 3166} 3167 3168VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceSparseImageFormatProperties( 3169 VkPhysicalDevice physicalDevice, 3170 VkFormat format, 3171 VkImageType type, 3172 VkSampleCountFlagBits samples, 3173 VkImageUsageFlags usage, 3174 VkImageTiling tiling, 3175 uint32_t* pNumProperties, 3176 VkSparseImageFormatProperties* pProperties) 3177{ 3178 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3179 struct loader_icd *icd = phys_dev->this_icd; 3180 3181 if (icd->GetPhysicalDeviceSparseImageFormatProperties) 3182 icd->GetPhysicalDeviceSparseImageFormatProperties(phys_dev->phys_dev, format, type, samples, usage, tiling, pNumProperties, pProperties); 3183} 3184 3185VKAPI_ATTR VkResult VKAPI_CALL loader_CreateDevice( 3186 VkPhysicalDevice physicalDevice, 3187 const VkDeviceCreateInfo* pCreateInfo, 3188 const VkAllocationCallbacks* pAllocator, 3189 VkDevice* pDevice) 3190{ 3191 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3192 struct loader_icd *icd = phys_dev->this_icd; 3193 struct loader_device *dev; 3194 struct loader_instance *inst; 3195 VkDeviceCreateInfo device_create_info; 3196 char **filtered_extension_names = NULL; 3197 VkResult res; 3198 3199 assert(pCreateInfo->queueCreateInfoCount >= 1); 3200 3201 if (!icd) 3202 return VK_ERROR_INITIALIZATION_FAILED; 3203 3204 inst = phys_dev->this_instance; 3205 3206 if (!icd->CreateDevice) { 3207 return VK_ERROR_INITIALIZATION_FAILED; 3208 } 3209 3210 /* validate any app enabled layers are available */ 3211 if (pCreateInfo->enabledLayerNameCount > 0) { 3212 res = loader_validate_layers(pCreateInfo->enabledLayerNameCount, 3213 pCreateInfo->ppEnabledLayerNames, 3214 &inst->device_layer_list); 3215 if (res != VK_SUCCESS) { 3216 return res; 3217 } 3218 } 3219 3220 /* Get the physical device extensions if they haven't been retrieved yet */ 3221 if (phys_dev->device_extension_cache.capacity == 0) { 3222 if (!loader_init_ext_list(inst, &phys_dev->device_extension_cache)) { 3223 return VK_ERROR_OUT_OF_HOST_MEMORY; 3224 } 3225 res = loader_add_physical_device_extensions( 3226 inst, physicalDevice, 3227 phys_dev->this_icd->this_icd_lib->lib_name, 3228 &phys_dev->device_extension_cache); 3229 if (res != VK_SUCCESS) { 3230 return res; 3231 } 3232 } 3233 /* make sure requested extensions to be enabled are supported */ 3234 res = loader_validate_device_extensions(phys_dev, &inst->device_layer_list, pCreateInfo); 3235 if (res != VK_SUCCESS) { 3236 return res; 3237 } 3238 3239 /* 3240 * NOTE: Need to filter the extensions to only those 3241 * supported by the ICD. 3242 * No ICD will advertise support for layers. An ICD 3243 * library could support a layer, but it would be 3244 * independent of the actual ICD, just in the same library. 3245 */ 3246 filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionNameCount * sizeof(char *)); 3247 if (!filtered_extension_names) { 3248 return VK_ERROR_OUT_OF_HOST_MEMORY; 3249 } 3250 3251 /* Copy user's data */ 3252 memcpy(&device_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo)); 3253 3254 /* ICD's do not use layers */ 3255 device_create_info.enabledLayerNameCount = 0; 3256 device_create_info.ppEnabledLayerNames = NULL; 3257 3258 device_create_info.enabledExtensionNameCount = 0; 3259 device_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 3260 3261 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 3262 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 3263 VkExtensionProperties *prop = get_extension_property(extension_name, 3264 &phys_dev->device_extension_cache); 3265 if (prop) { 3266 filtered_extension_names[device_create_info.enabledExtensionNameCount] = (char *) extension_name; 3267 device_create_info.enabledExtensionNameCount++; 3268 } 3269 } 3270 3271 // since physicalDevice object maybe wrapped by a layer need to get unwrapped version 3272 // we haven't yet called down the chain for the layer to unwrap the object 3273 res = icd->CreateDevice(phys_dev->phys_dev, pCreateInfo, pAllocator, pDevice); 3274 if (res != VK_SUCCESS) { 3275 return res; 3276 } 3277 3278 dev = loader_add_logical_device(inst, *pDevice, &icd->logical_device_list); 3279 if (dev == NULL) { 3280 return VK_ERROR_OUT_OF_HOST_MEMORY; 3281 } 3282 3283 loader_init_dispatch(*pDevice, &dev->loader_dispatch); 3284 3285 /* activate any layers on device chain which terminates with device*/ 3286 res = loader_enable_device_layers(inst, icd, dev, pCreateInfo, &inst->device_layer_list); 3287 if (res != VK_SUCCESS) { 3288 loader_destroy_logical_device(inst, dev); 3289 return res; 3290 } 3291 loader_activate_device_layers(inst, dev, *pDevice); 3292 3293 /* initialize any device extension dispatch entry's from the instance list*/ 3294 loader_init_dispatch_dev_ext(inst, dev); 3295 3296 /* finally can call down the chain */ 3297 res = dev->loader_dispatch.core_dispatch.CreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice); 3298 3299 dev->loader_dispatch.core_dispatch.CreateDevice = icd->CreateDevice; 3300 3301 return res; 3302} 3303 3304/** 3305 * Get an instance level or global level entry point address. 3306 * @param instance 3307 * @param pName 3308 * @return 3309 * If instance == NULL returns a global level functions only 3310 * If instance is valid returns a trampoline entry point for all dispatchable Vulkan 3311 * functions both core and extensions. 3312 */ 3313LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char * pName) 3314{ 3315 3316 void *addr; 3317 3318 addr = globalGetProcAddr(pName); 3319 if (instance == VK_NULL_HANDLE) { 3320 // get entrypoint addresses that are global (no dispatchable object) 3321 3322 return addr; 3323 } else { 3324 // if a global entrypoint return NULL 3325 if (addr) 3326 return NULL; 3327 } 3328 3329 struct loader_instance *ptr_instance = loader_get_instance(instance); 3330 if (ptr_instance == NULL) 3331 return NULL; 3332 // Return trampoline code for non-global entrypoints including any extensions. 3333 // Device extensions are returned if a layer or ICD supports the extension. 3334 // Instance extensions are returned if the extension is enabled and the loader 3335 // or someone else supports the extension 3336 return trampolineGetProcAddr(ptr_instance, pName); 3337 3338} 3339 3340/** 3341 * Get a device level or global level entry point address. 3342 * @param device 3343 * @param pName 3344 * @return 3345 * If device is valid, returns a device relative entry point for device level 3346 * entry points both core and extensions. 3347 * Device relative means call down the device chain. 3348 */ 3349LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char * pName) 3350{ 3351 void *addr; 3352 3353 /* for entrypoints that loader must handle (ie non-dispatchable or create object) 3354 make sure the loader entrypoint is returned */ 3355 addr = loader_non_passthrough_gdpa(pName); 3356 if (addr) { 3357 return addr; 3358 } 3359 3360 /* Although CreateDevice is on device chain it's dispatchable object isn't 3361 * a VkDevice or child of VkDevice so return NULL. 3362 */ 3363 if (!strcmp(pName, "CreateDevice")) 3364 return NULL; 3365 3366 /* return the dispatch table entrypoint for the fastest case */ 3367 const VkLayerDispatchTable *disp_table = * (VkLayerDispatchTable **) device; 3368 if (disp_table == NULL) 3369 return NULL; 3370 3371 addr = loader_lookup_device_dispatch_table(disp_table, pName); 3372 if (addr) 3373 return addr; 3374 3375 if (disp_table->GetDeviceProcAddr == NULL) 3376 return NULL; 3377 return disp_table->GetDeviceProcAddr(device, pName); 3378} 3379 3380LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( 3381 const char* pLayerName, 3382 uint32_t* pPropertyCount, 3383 VkExtensionProperties* pProperties) 3384{ 3385 struct loader_extension_list *global_ext_list=NULL; 3386 struct loader_layer_list instance_layers; 3387 struct loader_extension_list icd_extensions; 3388 struct loader_icd_libs icd_libs; 3389 uint32_t copy_size; 3390 3391 tls_instance = NULL; 3392 memset(&icd_extensions, 0, sizeof(icd_extensions)); 3393 memset(&instance_layers, 0, sizeof(instance_layers)); 3394 loader_platform_thread_once(&once_init, loader_initialize); 3395 3396 /* get layer libraries if needed */ 3397 if (pLayerName && strlen(pLayerName) != 0) { 3398 loader_layer_scan(NULL, &instance_layers, NULL); 3399 for (uint32_t i = 0; i < instance_layers.count; i++) { 3400 struct loader_layer_properties *props = &instance_layers.list[i]; 3401 if (strcmp(props->info.layerName, pLayerName) == 0) { 3402 global_ext_list = &props->instance_extension_list; 3403 } 3404 } 3405 } 3406 else { 3407 /* Scan/discover all ICD libraries */ 3408 memset(&icd_libs, 0 , sizeof(struct loader_icd_libs)); 3409 loader_icd_scan(NULL, &icd_libs); 3410 /* get extensions from all ICD's, merge so no duplicates */ 3411 loader_get_icd_loader_instance_extensions(NULL, &icd_libs, &icd_extensions); 3412 loader_scanned_icd_clear(NULL, &icd_libs); 3413 global_ext_list = &icd_extensions; 3414 } 3415 3416 if (global_ext_list == NULL) { 3417 loader_destroy_layer_list(NULL, &instance_layers); 3418 return VK_ERROR_LAYER_NOT_PRESENT; 3419 } 3420 3421 if (pProperties == NULL) { 3422 *pPropertyCount = global_ext_list->count; 3423 loader_destroy_layer_list(NULL, &instance_layers); 3424 loader_destroy_ext_list(NULL, &icd_extensions); 3425 return VK_SUCCESS; 3426 } 3427 3428 copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count; 3429 for (uint32_t i = 0; i < copy_size; i++) { 3430 memcpy(&pProperties[i], 3431 &global_ext_list->list[i], 3432 sizeof(VkExtensionProperties)); 3433 } 3434 *pPropertyCount = copy_size; 3435 loader_destroy_ext_list(NULL, &icd_extensions); 3436 3437 if (copy_size < global_ext_list->count) { 3438 loader_destroy_layer_list(NULL, &instance_layers); 3439 return VK_INCOMPLETE; 3440 } 3441 3442 loader_destroy_layer_list(NULL, &instance_layers); 3443 return VK_SUCCESS; 3444} 3445 3446LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( 3447 uint32_t* pPropertyCount, 3448 VkLayerProperties* pProperties) 3449{ 3450 3451 struct loader_layer_list instance_layer_list; 3452 tls_instance = NULL; 3453 3454 loader_platform_thread_once(&once_init, loader_initialize); 3455 3456 uint32_t copy_size; 3457 3458 /* get layer libraries */ 3459 memset(&instance_layer_list, 0, sizeof(instance_layer_list)); 3460 loader_layer_scan(NULL, &instance_layer_list, NULL); 3461 3462 if (pProperties == NULL) { 3463 *pPropertyCount = instance_layer_list.count; 3464 loader_destroy_layer_list(NULL, &instance_layer_list); 3465 return VK_SUCCESS; 3466 } 3467 3468 copy_size = (*pPropertyCount < instance_layer_list.count) ? *pPropertyCount : instance_layer_list.count; 3469 for (uint32_t i = 0; i < copy_size; i++) { 3470 memcpy(&pProperties[i], &instance_layer_list.list[i].info, sizeof(VkLayerProperties)); 3471 } 3472 *pPropertyCount = copy_size; 3473 loader_destroy_layer_list(NULL, &instance_layer_list); 3474 3475 if (copy_size < instance_layer_list.count) { 3476 return VK_INCOMPLETE; 3477 } 3478 3479 return VK_SUCCESS; 3480} 3481 3482VKAPI_ATTR VkResult VKAPI_CALL loader_EnumerateDeviceExtensionProperties( 3483 VkPhysicalDevice physicalDevice, 3484 const char* pLayerName, 3485 uint32_t* pPropertyCount, 3486 VkExtensionProperties* pProperties) 3487{ 3488 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3489 uint32_t copy_size; 3490 3491 uint32_t count; 3492 struct loader_extension_list *dev_ext_list=NULL; 3493 3494 /* get layer libraries if needed */ 3495 if (pLayerName && strlen(pLayerName) != 0) { 3496 for (uint32_t i = 0; i < phys_dev->this_instance->device_layer_list.count; i++) { 3497 struct loader_layer_properties *props = &phys_dev->this_instance->device_layer_list.list[i]; 3498 if (strcmp(props->info.layerName, pLayerName) == 0) { 3499 dev_ext_list = &props->device_extension_list; 3500 } 3501 } 3502 } 3503 else { 3504 /* this case is during the call down the instance chain */ 3505 struct loader_icd *icd = phys_dev->this_icd; 3506 VkResult res; 3507 res = icd->EnumerateDeviceExtensionProperties(phys_dev->phys_dev, NULL, pPropertyCount, pProperties); 3508 if (pProperties != NULL && res == VK_SUCCESS) { 3509 /* initialize dev_extension list within the physicalDevice object */ 3510 res = loader_init_physical_device_extensions(phys_dev->this_instance, 3511 phys_dev, *pPropertyCount, pProperties, 3512 &phys_dev->device_extension_cache); 3513 } 3514 return res; 3515 } 3516 3517 count = (dev_ext_list == NULL) ? 0: dev_ext_list->count; 3518 if (pProperties == NULL) { 3519 *pPropertyCount = count; 3520 return VK_SUCCESS; 3521 } 3522 3523 copy_size = *pPropertyCount < count ? *pPropertyCount : count; 3524 for (uint32_t i = 0; i < copy_size; i++) { 3525 memcpy(&pProperties[i], 3526 &dev_ext_list->list[i], 3527 sizeof(VkExtensionProperties)); 3528 } 3529 *pPropertyCount = copy_size; 3530 3531 if (copy_size < count) { 3532 return VK_INCOMPLETE; 3533 } 3534 3535 return VK_SUCCESS; 3536} 3537 3538VKAPI_ATTR VkResult VKAPI_CALL loader_EnumerateDeviceLayerProperties( 3539 VkPhysicalDevice physicalDevice, 3540 uint32_t* pPropertyCount, 3541 VkLayerProperties* pProperties) 3542{ 3543 uint32_t copy_size; 3544 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3545 3546 uint32_t count = phys_dev->this_instance->device_layer_list.count; 3547 3548 if (pProperties == NULL) { 3549 *pPropertyCount = count; 3550 return VK_SUCCESS; 3551 } 3552 3553 copy_size = (*pPropertyCount < count) ? *pPropertyCount : count; 3554 for (uint32_t i = 0; i < copy_size; i++) { 3555 memcpy(&pProperties[i], &(phys_dev->this_instance->device_layer_list.list[i].info), sizeof(VkLayerProperties)); 3556 } 3557 *pPropertyCount = copy_size; 3558 3559 if (copy_size < count) { 3560 return VK_INCOMPLETE; 3561 } 3562 3563 return VK_SUCCESS; 3564} 3565