loader.c revision 42d509c61555720631361c4f3c2c3ad7e1daca18
1baa3858d3f5d128a5c8466b700098109edcad5f2repo sync/* 2baa3858d3f5d128a5c8466b700098109edcad5f2repo sync * 3baa3858d3f5d128a5c8466b700098109edcad5f2repo sync * Copyright (C) 2015 Valve Corporation 4baa3858d3f5d128a5c8466b700098109edcad5f2repo sync * 5baa3858d3f5d128a5c8466b700098109edcad5f2repo sync * Permission is hereby granted, free of charge, to any person obtaining a 6cd66d540cead3f8200b0c73bad9c276d67896c3dDavid Srbecky * copy of this software and associated documentation files (the "Software"), 7baa3858d3f5d128a5c8466b700098109edcad5f2repo sync * to deal in the Software without restriction, including without limitation 8cd66d540cead3f8200b0c73bad9c276d67896c3dDavid Srbecky * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9cd66d540cead3f8200b0c73bad9c276d67896c3dDavid Srbecky * and/or sell copies of the Software, and to permit persons to whom the 10cd66d540cead3f8200b0c73bad9c276d67896c3dDavid Srbecky * Software is furnished to do so, subject to the following conditions: 11cd66d540cead3f8200b0c73bad9c276d67896c3dDavid Srbecky * 12cd66d540cead3f8200b0c73bad9c276d67896c3dDavid Srbecky * The above copyright notice and this permission notice shall be included 13cd66d540cead3f8200b0c73bad9c276d67896c3dDavid Srbecky * in all copies or substantial portions of the Software. 14cd66d540cead3f8200b0c73bad9c276d67896c3dDavid Srbecky * 15cd66d540cead3f8200b0c73bad9c276d67896c3dDavid Srbecky * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16baa3858d3f5d128a5c8466b700098109edcad5f2repo sync * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17baa3858d3f5d128a5c8466b700098109edcad5f2repo sync * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Author: Chia-I Wu <olvaffe@gmail.com> 24 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com> 25 * Author: Ian Elliott <ian@LunarG.com> 26 * Author: Jon Ashburn <jon@lunarg.com> 27 * 28 */ 29#define _GNU_SOURCE 30#include <stdio.h> 31#include <stdlib.h> 32#include <stdarg.h> 33#include <stdbool.h> 34#include <string.h> 35 36#include <sys/types.h> 37#if defined(_WIN32) 38#include "dirent_on_windows.h" 39#else // _WIN32 40#include <dirent.h> 41#endif // _WIN32 42#include "vk_loader_platform.h" 43#include "loader.h" 44#include "gpa_helper.h" 45#include "table_ops.h" 46#include "debug_report.h" 47#include "wsi_swapchain.h" 48#include "vulkan/vk_icd.h" 49#include "cJSON.h" 50 51static loader_platform_dl_handle loader_add_layer_lib( 52 const struct loader_instance *inst, 53 const char *chain_type, 54 struct loader_layer_properties *layer_prop); 55 56static void loader_remove_layer_lib( 57 struct loader_instance *inst, 58 struct loader_layer_properties *layer_prop); 59 60struct loader_struct loader = {0}; 61// TLS for instance for alloc/free callbacks 62THREAD_LOCAL_DECL struct loader_instance *tls_instance; 63 64static bool loader_init_ext_list( 65 const struct loader_instance *inst, 66 struct loader_extension_list *ext_info); 67 68static int loader_platform_combine_path(char *dest, int len, ...); 69 70struct loader_phys_dev_per_icd { 71 uint32_t count; 72 VkPhysicalDevice *phys_devs; 73}; 74 75enum loader_debug { 76 LOADER_INFO_BIT = 0x01, 77 LOADER_WARN_BIT = 0x02, 78 LOADER_PERF_BIT = 0x04, 79 LOADER_ERROR_BIT = 0x08, 80 LOADER_DEBUG_BIT = 0x10, 81}; 82 83uint32_t g_loader_debug = 0; 84uint32_t g_loader_log_msgs = 0; 85 86//thread safety lock for accessing global data structures such as "loader" 87// all entrypoints on the instance chain need to be locked except GPA 88// additionally CreateDevice and DestroyDevice needs to be locked 89loader_platform_thread_mutex loader_lock; 90loader_platform_thread_mutex loader_json_lock; 91 92// This table contains the loader's instance dispatch table, which contains 93// default functions if no instance layers are activated. This contains 94// pointers to "terminator functions". 95const VkLayerInstanceDispatchTable instance_disp = { 96 .GetInstanceProcAddr = vkGetInstanceProcAddr, 97 .CreateInstance = loader_CreateInstance, 98 .DestroyInstance = loader_DestroyInstance, 99 .EnumeratePhysicalDevices = loader_EnumeratePhysicalDevices, 100 .GetPhysicalDeviceFeatures = loader_GetPhysicalDeviceFeatures, 101 .GetPhysicalDeviceFormatProperties = loader_GetPhysicalDeviceFormatProperties, 102 .GetPhysicalDeviceImageFormatProperties = loader_GetPhysicalDeviceImageFormatProperties, 103 .GetPhysicalDeviceProperties = loader_GetPhysicalDeviceProperties, 104 .GetPhysicalDeviceQueueFamilyProperties = loader_GetPhysicalDeviceQueueFamilyProperties, 105 .GetPhysicalDeviceMemoryProperties = loader_GetPhysicalDeviceMemoryProperties, 106 .EnumerateDeviceExtensionProperties = loader_EnumerateDeviceExtensionProperties, 107 .EnumerateDeviceLayerProperties = loader_EnumerateDeviceLayerProperties, 108 .GetPhysicalDeviceSparseImageFormatProperties = loader_GetPhysicalDeviceSparseImageFormatProperties, 109 .GetPhysicalDeviceSurfaceSupportKHR = loader_GetPhysicalDeviceSurfaceSupportKHR, 110 .DbgCreateMsgCallback = loader_DbgCreateMsgCallback, 111 .DbgDestroyMsgCallback = loader_DbgDestroyMsgCallback, 112}; 113 114LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init); 115 116void* loader_heap_alloc( 117 const struct loader_instance *instance, 118 size_t size, 119 VkSystemAllocationScope alloc_scope) 120{ 121 if (instance && instance->alloc_callbacks.pfnAllocation) { 122 /* TODO: What should default alignment be? 1, 4, 8, other? */ 123 return instance->alloc_callbacks.pfnAllocation(instance->alloc_callbacks.pUserData, size, 4, alloc_scope); 124 } 125 return malloc(size); 126} 127 128void loader_heap_free( 129 const struct loader_instance *instance, 130 void *pMemory) 131{ 132 if (pMemory == NULL) return; 133 if (instance && instance->alloc_callbacks.pfnFree) { 134 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMemory); 135 return; 136 } 137 free(pMemory); 138} 139 140void* loader_heap_realloc( 141 const struct loader_instance *instance, 142 void *pMemory, 143 size_t orig_size, 144 size_t size, 145 VkSystemAllocationScope alloc_scope) 146{ 147 if (pMemory == NULL || orig_size == 0) 148 return loader_heap_alloc(instance, size, alloc_scope); 149 if (size == 0) { 150 loader_heap_free(instance, pMemory); 151 return NULL; 152 } 153 if (instance && instance->alloc_callbacks.pfnAllocation) { 154 if (size <= orig_size) { 155 memset(((uint8_t *)pMemory) + size, 0, orig_size - size); 156 return pMemory; 157 } 158 void *new_ptr = instance->alloc_callbacks.pfnAllocation(instance->alloc_callbacks.pUserData, size, 4, alloc_scope); 159 if (!new_ptr) 160 return NULL; 161 memcpy(new_ptr, pMemory, orig_size); 162 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMemory); 163 return new_ptr; 164 } 165 return realloc(pMemory, size); 166} 167 168void *loader_tls_heap_alloc(size_t size) 169{ 170 return loader_heap_alloc(tls_instance, size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 171} 172 173void loader_tls_heap_free(void *pMemory) 174{ 175 loader_heap_free(tls_instance, pMemory); 176} 177 178static void loader_log(VkFlags msg_type, int32_t msg_code, 179 const char *format, ...) 180{ 181 char msg[512]; 182 va_list ap; 183 int ret; 184 185 if (!(msg_type & g_loader_log_msgs)) { 186 return; 187 } 188 189 va_start(ap, format); 190 ret = vsnprintf(msg, sizeof(msg), format, ap); 191 if ((ret >= (int) sizeof(msg)) || ret < 0) { 192 msg[sizeof(msg)-1] = '\0'; 193 } 194 va_end(ap); 195 196#if defined(WIN32) 197 OutputDebugString(msg); 198 OutputDebugString("\n"); 199#endif 200 fputs(msg, stderr); 201 fputc('\n', stderr); 202} 203 204#if defined(WIN32) 205static char *loader_get_next_path(char *path); 206/** 207* Find the list of registry files (names within a key) in key "location". 208* 209* This function looks in the registry (hive = DEFAULT_VK_REGISTRY_HIVE) key as given in "location" 210* for a list or name/values which are added to a returned list (function return value). 211* The DWORD values within the key must be 0 or they are skipped. 212* Function return is a string with a ';' separated list of filenames. 213* Function return is NULL if no valid name/value pairs are found in the key, 214* or the key is not found. 215* 216* \returns 217* A string list of filenames as pointer. 218* When done using the returned string list, pointer should be freed. 219*/ 220static char *loader_get_registry_files(const struct loader_instance *inst, char *location) 221{ 222 LONG rtn_value; 223 HKEY hive, key; 224 DWORD access_flags = KEY_QUERY_VALUE; 225 char name[2048]; 226 char *out = NULL; 227 char *loc = location; 228 char *next; 229 DWORD idx = 0; 230 DWORD name_size = sizeof(name); 231 DWORD value; 232 DWORD total_size = 4096; 233 DWORD value_size = sizeof(value); 234 235 while(*loc) 236 { 237 next = loader_get_next_path(loc); 238 hive = DEFAULT_VK_REGISTRY_HIVE; 239 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 240 if (rtn_value != ERROR_SUCCESS) { 241 // We didn't find the key. Try the 32-bit hive (where we've seen the 242 // key end up on some people's systems): 243 access_flags |= KEY_WOW64_32KEY; 244 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 245 if (rtn_value != ERROR_SUCCESS) { 246 // We still couldn't find the key, so give up: 247 loc = next; 248 continue; 249 } 250 } 251 252 while ((rtn_value = RegEnumValue(key, idx++, name, &name_size, NULL, NULL, (LPBYTE) &value, &value_size)) == ERROR_SUCCESS) { 253 if (value_size == sizeof(value) && value == 0) { 254 if (out == NULL) { 255 out = loader_heap_alloc(inst, total_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 256 out[0] = '\0'; 257 } 258 else if (strlen(out) + name_size + 1 > total_size) { 259 out = loader_heap_realloc(inst, out, total_size, total_size * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 260 total_size *= 2; 261 } 262 if (out == NULL) { 263 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory, failed loader_get_registry_files"); 264 return NULL; 265 } 266 if (strlen(out) == 0) 267 snprintf(out, name_size + 1, "%s", name); 268 else 269 snprintf(out + strlen(out), name_size + 2, "%c%s", PATH_SEPERATOR, name); 270 } 271 name_size = 2048; 272 } 273 loc = next; 274 } 275 276 return out; 277} 278 279#endif // WIN32 280 281/** 282 * Combine path elements, separating each element with the platform-specific 283 * directory separator, and save the combined string to a destination buffer, 284 * not exceeding the given length. Path elements are given as variadic args, 285 * with a NULL element terminating the list. 286 * 287 * \returns the total length of the combined string, not including an ASCII 288 * NUL termination character. This length may exceed the available storage: 289 * in this case, the written string will be truncated to avoid a buffer 290 * overrun, and the return value will greater than or equal to the storage 291 * size. A NULL argument may be provided as the destination buffer in order 292 * to determine the required string length without actually writing a string. 293 */ 294 295static int loader_platform_combine_path(char *dest, int len, ...) 296{ 297 int required_len = 0; 298 va_list ap; 299 const char *component; 300 301 va_start(ap, len); 302 303 while((component = va_arg(ap, const char *))) { 304 if (required_len > 0) { 305 // This path element is not the first non-empty element; prepend 306 // a directory separator if space allows 307 if (dest && required_len + 1 < len) { 308 snprintf(dest + required_len, len - required_len, "%c", 309 DIRECTORY_SYMBOL); 310 } 311 required_len++; 312 } 313 314 if (dest && required_len < len) { 315 strncpy(dest + required_len, component, len - required_len); 316 } 317 required_len += strlen(component); 318 } 319 320 va_end(ap); 321 322 // strncpy(3) won't add a NUL terminating byte in the event of truncation. 323 if (dest && required_len >= len) { 324 dest[len - 1] = '\0'; 325 } 326 327 return required_len; 328} 329 330 331/** 332 * Given string of three part form "maj.min.pat" convert to a vulkan version 333 * number. 334 */ 335static uint32_t loader_make_version(const char *vers_str) 336{ 337 uint32_t vers = 0, major=0, minor=0, patch=0; 338 char *minor_str= NULL; 339 char *patch_str = NULL; 340 char *cstr; 341 char *str; 342 343 if (!vers_str) 344 return vers; 345 cstr = loader_stack_alloc(strlen(vers_str) + 1); 346 strcpy(cstr, vers_str); 347 while ((str = strchr(cstr, '.')) != NULL) { 348 if (minor_str == NULL) { 349 minor_str = str + 1; 350 *str = '\0'; 351 major = atoi(cstr); 352 } 353 else if (patch_str == NULL) { 354 patch_str = str + 1; 355 *str = '\0'; 356 minor = atoi(minor_str); 357 } 358 else { 359 return vers; 360 } 361 cstr = str + 1; 362 } 363 patch = atoi(patch_str); 364 365 return VK_MAKE_VERSION(major, minor, patch); 366 367} 368 369bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) 370{ 371 return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false; 372} 373 374/** 375 * Search the given ext_array for an extension 376 * matching the given vk_ext_prop 377 */ 378bool has_vk_extension_property_array( 379 const VkExtensionProperties *vk_ext_prop, 380 const uint32_t count, 381 const VkExtensionProperties *ext_array) 382{ 383 for (uint32_t i = 0; i < count; i++) { 384 if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) 385 return true; 386 } 387 return false; 388} 389 390/** 391 * Search the given ext_list for an extension 392 * matching the given vk_ext_prop 393 */ 394bool has_vk_extension_property( 395 const VkExtensionProperties *vk_ext_prop, 396 const struct loader_extension_list *ext_list) 397{ 398 for (uint32_t i = 0; i < ext_list->count; i++) { 399 if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) 400 return true; 401 } 402 return false; 403} 404 405static inline bool loader_is_layer_type_device(const enum layer_type type) { 406 if ((type & VK_LAYER_TYPE_DEVICE_EXPLICIT) || 407 (type & VK_LAYER_TYPE_DEVICE_IMPLICIT)) 408 return true; 409 return false; 410} 411 412/* 413 * Search the given layer list for a layer matching the given layer name 414 */ 415static struct loader_layer_properties *loader_get_layer_property( 416 const char *name, 417 const struct loader_layer_list *layer_list) 418{ 419 for (uint32_t i = 0; i < layer_list->count; i++) { 420 const VkLayerProperties *item = &layer_list->list[i].info; 421 if (strcmp(name, item->layerName) == 0) 422 return &layer_list->list[i]; 423 } 424 return NULL; 425} 426 427/** 428 * Get the next unused layer property in the list. Init the property to zero. 429 */ 430static struct loader_layer_properties *loader_get_next_layer_property( 431 const struct loader_instance *inst, 432 struct loader_layer_list *layer_list) 433{ 434 if (layer_list->capacity == 0) { 435 layer_list->list = loader_heap_alloc(inst, 436 sizeof(struct loader_layer_properties) * 64, 437 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 438 if (layer_list->list == NULL) { 439 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't add any layer properties to list"); 440 return NULL; 441 } 442 memset(layer_list->list, 0, sizeof(struct loader_layer_properties) * 64); 443 layer_list->capacity = sizeof(struct loader_layer_properties) * 64; 444 } 445 446 // ensure enough room to add an entry 447 if ((layer_list->count + 1) * sizeof (struct loader_layer_properties) 448 > layer_list->capacity) { 449 layer_list->list = loader_heap_realloc(inst, layer_list->list, 450 layer_list->capacity, 451 layer_list->capacity * 2, 452 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 453 if (layer_list->list == NULL) { 454 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 455 "realloc failed for layer list"); 456 } 457 layer_list->capacity *= 2; 458 } 459 460 layer_list->count++; 461 return &(layer_list->list[layer_list->count - 1]); 462} 463 464/** 465 * Remove all layer properties entrys from the list 466 */ 467void loader_delete_layer_properties( 468 const struct loader_instance *inst, 469 struct loader_layer_list *layer_list) 470{ 471 uint32_t i; 472 473 if (!layer_list) 474 return; 475 476 for (i = 0; i < layer_list->count; i++) { 477 loader_destroy_ext_list(inst, &layer_list->list[i].instance_extension_list); 478 loader_destroy_ext_list(inst, &layer_list->list[i].device_extension_list); 479 } 480 layer_list->count = 0; 481 482 if (layer_list->capacity > 0) { 483 layer_list->capacity = 0; 484 loader_heap_free(inst, layer_list->list); 485 } 486 487} 488 489static void loader_add_global_extensions( 490 const struct loader_instance *inst, 491 const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, 492 const char *lib_name, 493 struct loader_extension_list *ext_list) 494{ 495 uint32_t i, count; 496 VkExtensionProperties *ext_props; 497 VkResult res; 498 499 if (!fp_get_props) { 500 /* No EnumerateInstanceExtensionProperties defined */ 501 return; 502 } 503 504 res = fp_get_props(NULL, &count, NULL); 505 if (res != VK_SUCCESS) { 506 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Error getting global extension count from %s", lib_name); 507 return; 508 } 509 510 if (count == 0) { 511 /* No ExtensionProperties to report */ 512 return; 513 } 514 515 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); 516 517 res = fp_get_props(NULL, &count, ext_props); 518 if (res != VK_SUCCESS) { 519 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Error getting global extensions from %s", lib_name); 520 return; 521 } 522 523 for (i = 0; i < count; i++) { 524 char spec_version[64]; 525 526 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 527 VK_MAJOR(ext_props[i].specVersion), 528 VK_MINOR(ext_props[i].specVersion), 529 VK_PATCH(ext_props[i].specVersion)); 530 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 531 "Global Extension: %s (%s) version %s", 532 ext_props[i].extensionName, lib_name, spec_version); 533 loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 534 } 535 536 return; 537} 538 539/* 540 * Initialize ext_list with the physical device extensions. 541 * The extension properties are passed as inputs in count and ext_props. 542 */ 543static VkResult loader_init_physical_device_extensions( 544 const struct loader_instance *inst, 545 struct loader_physical_device *phys_dev, 546 uint32_t count, 547 VkExtensionProperties *ext_props, 548 struct loader_extension_list *ext_list) 549{ 550 VkResult res; 551 uint32_t i; 552 553 if (!loader_init_ext_list(inst, ext_list)) { 554 return VK_ERROR_OUT_OF_HOST_MEMORY; 555 } 556 557 for (i = 0; i < count; i++) { 558 char spec_version[64]; 559 560 snprintf(spec_version, sizeof (spec_version), "%d.%d.%d", 561 VK_MAJOR(ext_props[i].specVersion), 562 VK_MINOR(ext_props[i].specVersion), 563 VK_PATCH(ext_props[i].specVersion)); 564 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 565 "PhysicalDevice Extension: %s (%s) version %s", 566 ext_props[i].extensionName, phys_dev->this_icd->this_icd_lib->lib_name, spec_version); 567 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 568 if (res != VK_SUCCESS) 569 return res; 570 } 571 572 return VK_SUCCESS; 573} 574 575static VkResult loader_add_physical_device_extensions( 576 const struct loader_instance *inst, 577 VkPhysicalDevice physical_device, 578 const char *lib_name, 579 struct loader_extension_list *ext_list) 580{ 581 uint32_t i, count; 582 VkResult res; 583 VkExtensionProperties *ext_props; 584 585 res = loader_EnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL); 586 if (res == VK_SUCCESS && count > 0) { 587 ext_props = loader_stack_alloc(count * sizeof (VkExtensionProperties)); 588 if (!ext_props) 589 return VK_ERROR_OUT_OF_HOST_MEMORY; 590 res = loader_EnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props); 591 if (res != VK_SUCCESS) 592 return res; 593 for (i = 0; i < count; i++) { 594 char spec_version[64]; 595 596 snprintf(spec_version, sizeof (spec_version), "%d.%d.%d", 597 VK_MAJOR(ext_props[i].specVersion), 598 VK_MINOR(ext_props[i].specVersion), 599 VK_PATCH(ext_props[i].specVersion)); 600 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 601 "PhysicalDevice Extension: %s (%s) version %s", 602 ext_props[i].extensionName, lib_name, spec_version); 603 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 604 if (res != VK_SUCCESS) 605 return res; 606 } 607 } else { 608 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Error getting physical device extension info count from library %s", lib_name); 609 return res; 610 } 611 612 return VK_SUCCESS; 613} 614 615static bool loader_init_ext_list(const struct loader_instance *inst, 616 struct loader_extension_list *ext_info) 617{ 618 ext_info->capacity = 32 * sizeof(VkExtensionProperties); 619 ext_info->list = loader_heap_alloc(inst, ext_info->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 620 if (ext_info->list == NULL) { 621 return false; 622 } 623 memset(ext_info->list, 0, ext_info->capacity); 624 ext_info->count = 0; 625 return true; 626} 627 628void loader_destroy_ext_list(const struct loader_instance *inst, 629 struct loader_extension_list *ext_info) 630{ 631 loader_heap_free(inst, ext_info->list); 632 ext_info->count = 0; 633 ext_info->capacity = 0; 634} 635 636/* 637 * Append non-duplicate extension properties defined in props 638 * to the given ext_list. 639 * Return 640 * Vk_SUCCESS on success 641 */ 642VkResult loader_add_to_ext_list( 643 const struct loader_instance *inst, 644 struct loader_extension_list *ext_list, 645 uint32_t prop_list_count, 646 const VkExtensionProperties *props) 647{ 648 uint32_t i; 649 const VkExtensionProperties *cur_ext; 650 651 if (ext_list->list == NULL || ext_list->capacity == 0) { 652 loader_init_ext_list(inst, ext_list); 653 } 654 655 if (ext_list->list == NULL) 656 return VK_ERROR_OUT_OF_HOST_MEMORY; 657 658 for (i = 0; i < prop_list_count; i++) { 659 cur_ext = &props[i]; 660 661 // look for duplicates 662 if (has_vk_extension_property(cur_ext, ext_list)) { 663 continue; 664 } 665 666 // add to list at end 667 // check for enough capacity 668 if (ext_list->count * sizeof(VkExtensionProperties) 669 >= ext_list->capacity) { 670 671 ext_list->list = loader_heap_realloc(inst, 672 ext_list->list, 673 ext_list->capacity, 674 ext_list->capacity * 2, 675 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 676 677 if (ext_list->list == NULL) 678 return VK_ERROR_OUT_OF_HOST_MEMORY; 679 680 // double capacity 681 ext_list->capacity *= 2; 682 } 683 684 memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties)); 685 ext_list->count++; 686 } 687 return VK_SUCCESS; 688} 689 690/** 691 * Search the given search_list for any layers in the props list. 692 * Add these to the output layer_list. Don't add duplicates to the output layer_list. 693 */ 694static VkResult loader_add_layer_names_to_list( 695 const struct loader_instance *inst, 696 struct loader_layer_list *output_list, 697 uint32_t name_count, 698 const char * const *names, 699 const struct loader_layer_list *search_list) 700{ 701 struct loader_layer_properties *layer_prop; 702 VkResult err = VK_SUCCESS; 703 704 for (uint32_t i = 0; i < name_count; i++) { 705 const char *search_target = names[i]; 706 layer_prop = loader_get_layer_property(search_target, search_list); 707 if (!layer_prop) { 708 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Unable to find layer %s", search_target); 709 err = VK_ERROR_LAYER_NOT_PRESENT; 710 continue; 711 } 712 713 loader_add_to_layer_list(inst, output_list, 1, layer_prop); 714 } 715 716 return err; 717} 718 719 720/* 721 * Manage lists of VkLayerProperties 722 */ 723static bool loader_init_layer_list(const struct loader_instance *inst, 724 struct loader_layer_list *list) 725{ 726 list->capacity = 32 * sizeof(struct loader_layer_properties); 727 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 728 if (list->list == NULL) { 729 return false; 730 } 731 memset(list->list, 0, list->capacity); 732 list->count = 0; 733 return true; 734} 735 736void loader_destroy_layer_list(const struct loader_instance *inst, 737 struct loader_layer_list *layer_list) 738{ 739 loader_heap_free(inst, layer_list->list); 740 layer_list->count = 0; 741 layer_list->capacity = 0; 742} 743 744/* 745 * Manage list of layer libraries (loader_lib_info) 746 */ 747static bool loader_init_layer_library_list(const struct loader_instance *inst, 748 struct loader_layer_library_list *list) 749{ 750 list->capacity = 32 * sizeof(struct loader_lib_info); 751 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 752 if (list->list == NULL) { 753 return false; 754 } 755 memset(list->list, 0, list->capacity); 756 list->count = 0; 757 return true; 758} 759 760void loader_destroy_layer_library_list(const struct loader_instance *inst, 761 struct loader_layer_library_list *list) 762{ 763 for (uint32_t i = 0; i < list->count; i++) { 764 loader_heap_free(inst, list->list[i].lib_name); 765 } 766 loader_heap_free(inst, list->list); 767 list->count = 0; 768 list->capacity = 0; 769} 770 771void loader_add_to_layer_library_list( 772 const struct loader_instance *inst, 773 struct loader_layer_library_list *list, 774 uint32_t item_count, 775 const struct loader_lib_info *new_items) 776{ 777 uint32_t i; 778 struct loader_lib_info *item; 779 780 if (list->list == NULL || list->capacity == 0) { 781 loader_init_layer_library_list(inst, list); 782 } 783 784 if (list->list == NULL) 785 return; 786 787 for (i = 0; i < item_count; i++) { 788 item = (struct loader_lib_info *) &new_items[i]; 789 790 // look for duplicates 791 for (uint32_t j = 0; j < list->count; j++) { 792 if (strcmp(list->list[i].lib_name, new_items->lib_name) == 0) { 793 continue; 794 } 795 } 796 797 // add to list at end 798 // check for enough capacity 799 if (list->count * sizeof(struct loader_lib_info) 800 >= list->capacity) { 801 802 list->list = loader_heap_realloc(inst, 803 list->list, 804 list->capacity, 805 list->capacity * 2, 806 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 807 // double capacity 808 list->capacity *= 2; 809 } 810 811 memcpy(&list->list[list->count], item, sizeof(struct loader_lib_info)); 812 list->count++; 813 } 814} 815 816 817/* 818 * Search the given layer list for a list 819 * matching the given VkLayerProperties 820 */ 821bool has_vk_layer_property( 822 const VkLayerProperties *vk_layer_prop, 823 const struct loader_layer_list *list) 824{ 825 for (uint32_t i = 0; i < list->count; i++) { 826 if (strcmp(vk_layer_prop->layerName, list->list[i].info.layerName) == 0) 827 return true; 828 } 829 return false; 830} 831 832/* 833 * Search the given layer list for a layer 834 * matching the given name 835 */ 836bool has_layer_name( 837 const char *name, 838 const struct loader_layer_list *list) 839{ 840 for (uint32_t i = 0; i < list->count; i++) { 841 if (strcmp(name, list->list[i].info.layerName) == 0) 842 return true; 843 } 844 return false; 845} 846 847/* 848 * Append non-duplicate layer properties defined in prop_list 849 * to the given layer_info list 850 */ 851void loader_add_to_layer_list( 852 const struct loader_instance *inst, 853 struct loader_layer_list *list, 854 uint32_t prop_list_count, 855 const struct loader_layer_properties *props) 856{ 857 uint32_t i; 858 struct loader_layer_properties *layer; 859 860 if (list->list == NULL || list->capacity == 0) { 861 loader_init_layer_list(inst, list); 862 } 863 864 if (list->list == NULL) 865 return; 866 867 for (i = 0; i < prop_list_count; i++) { 868 layer = (struct loader_layer_properties *) &props[i]; 869 870 // look for duplicates 871 if (has_vk_layer_property(&layer->info, list)) { 872 continue; 873 } 874 875 // add to list at end 876 // check for enough capacity 877 if (list->count * sizeof(struct loader_layer_properties) 878 >= list->capacity) { 879 880 list->list = loader_heap_realloc(inst, 881 list->list, 882 list->capacity, 883 list->capacity * 2, 884 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 885 // double capacity 886 list->capacity *= 2; 887 } 888 889 memcpy(&list->list[list->count], layer, sizeof(struct loader_layer_properties)); 890 list->count++; 891 } 892} 893 894/** 895 * Search the search_list for any layer with a name 896 * that matches the given name and a type that matches the given type 897 * Add all matching layers to the found_list 898 * Do not add if found loader_layer_properties is already 899 * on the found_list. 900 */ 901static void loader_find_layer_name_add_list( 902 const struct loader_instance *inst, 903 const char *name, 904 const enum layer_type type, 905 const struct loader_layer_list *search_list, 906 struct loader_layer_list *found_list) 907{ 908 bool found = false; 909 for (uint32_t i = 0; i < search_list->count; i++) { 910 struct loader_layer_properties *layer_prop = &search_list->list[i]; 911 if (0 == strcmp(layer_prop->info.layerName, name) && 912 (layer_prop->type & type)) { 913 /* Found a layer with the same name, add to found_list */ 914 loader_add_to_layer_list(inst, found_list, 1, layer_prop); 915 found = true; 916 } 917 } 918 if (!found) { 919 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Warning, couldn't find layer name %s to activate", name); 920 } 921} 922 923static VkExtensionProperties *get_extension_property( 924 const char *name, 925 const struct loader_extension_list *list) 926{ 927 for (uint32_t i = 0; i < list->count; i++) { 928 if (strcmp(name, list->list[i].extensionName) == 0) 929 return &list->list[i]; 930 } 931 return NULL; 932} 933 934/* 935 * For global exenstions implemented within the loader (i.e. DEBUG_REPORT 936 * the extension must provide two entry points for the loader to use: 937 * - "trampoline" entry point - this is the address returned by GetProcAddr 938 * and will always do what's necessary to support a global call. 939 * - "terminator" function - this function will be put at the end of the 940 * instance chain and will contain the necessary logica to call / process 941 * the extension for the appropriate ICDs that are available. 942 * There is no generic mechanism for including these functions, the references 943 * must be placed into the appropriate loader entry points. 944 * GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr requests 945 * loader_coalesce_extensions(void) - add extension records to the list of global 946 * extension available to the app. 947 * instance_disp - add function pointer for terminator function to this array. 948 * The extension itself should be in a separate file that will be 949 * linked directly with the loader. 950 */ 951 952void loader_get_icd_loader_instance_extensions( 953 const struct loader_instance *inst, 954 struct loader_icd_libs *icd_libs, 955 struct loader_extension_list *inst_exts) 956{ 957 struct loader_extension_list icd_exts; 958 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Build ICD instance extension list"); 959 // traverse scanned icd list adding non-duplicate extensions to the list 960 for (uint32_t i = 0; i < icd_libs->count; i++) { 961 loader_init_ext_list(inst, &icd_exts); 962 loader_add_global_extensions(inst, icd_libs->list[i].EnumerateInstanceExtensionProperties, 963 icd_libs->list[i].lib_name, 964 &icd_exts); 965 loader_add_to_ext_list(inst, inst_exts, 966 icd_exts.count, 967 icd_exts.list); 968 loader_destroy_ext_list(inst, &icd_exts); 969 }; 970 971 // Traverse loader's extensions, adding non-duplicate extensions to the list 972 wsi_swapchain_add_instance_extensions(inst, inst_exts); 973 debug_report_add_instance_extensions(inst, inst_exts); 974} 975 976struct loader_icd *loader_get_icd_and_device(const VkDevice device, 977 struct loader_device **found_dev) 978{ 979 *found_dev = NULL; 980 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 981 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) { 982 for (struct loader_device *dev = icd->logical_device_list; dev; dev = dev->next) 983 /* Value comparison of device prevents object wrapping by layers */ 984 if (loader_get_dispatch(dev->device) == loader_get_dispatch(device)) { 985 *found_dev = dev; 986 return icd; 987 } 988 } 989 } 990 return NULL; 991} 992 993static void loader_destroy_logical_device(const struct loader_instance *inst, 994 struct loader_device *dev) 995{ 996 loader_heap_free(inst, dev->app_extension_props); 997 if (dev->activated_layer_list.count) 998 loader_destroy_layer_list(inst, &dev->activated_layer_list); 999 loader_heap_free(inst, dev); 1000} 1001 1002static struct loader_device *loader_add_logical_device( 1003 const struct loader_instance *inst, 1004 const VkDevice dev, 1005 struct loader_device **device_list) 1006{ 1007 struct loader_device *new_dev; 1008 1009 new_dev = loader_heap_alloc(inst, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); 1010 if (!new_dev) { 1011 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc struct laoder-device"); 1012 return NULL; 1013 } 1014 1015 memset(new_dev, 0, sizeof(struct loader_device)); 1016 1017 new_dev->next = *device_list; 1018 new_dev->device = dev; 1019 *device_list = new_dev; 1020 return new_dev; 1021} 1022 1023void loader_remove_logical_device( 1024 const struct loader_instance *inst, 1025 VkDevice device) 1026{ 1027 struct loader_device *found_dev, *dev, *prev_dev; 1028 struct loader_icd *icd; 1029 icd = loader_get_icd_and_device(device, &found_dev); 1030 1031 if (!icd || !found_dev) 1032 return; 1033 1034 prev_dev = NULL; 1035 dev = icd->logical_device_list; 1036 while (dev && dev != found_dev) { 1037 prev_dev = dev; 1038 dev = dev->next; 1039 } 1040 1041 if (prev_dev) 1042 prev_dev->next = found_dev->next; 1043 else 1044 icd->logical_device_list = found_dev->next; 1045 loader_destroy_logical_device(inst, found_dev); 1046} 1047 1048 1049static void loader_icd_destroy( 1050 struct loader_instance *ptr_inst, 1051 struct loader_icd *icd) 1052{ 1053 ptr_inst->total_icd_count--; 1054 for (struct loader_device *dev = icd->logical_device_list; dev; ) { 1055 struct loader_device *next_dev = dev->next; 1056 loader_destroy_logical_device(ptr_inst, dev); 1057 dev = next_dev; 1058 } 1059 1060 loader_heap_free(ptr_inst, icd); 1061} 1062 1063static struct loader_icd * loader_icd_create(const struct loader_instance *inst) 1064{ 1065 struct loader_icd *icd; 1066 1067 icd = loader_heap_alloc(inst, sizeof(*icd), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1068 if (!icd) 1069 return NULL; 1070 1071 memset(icd, 0, sizeof(*icd)); 1072 1073 return icd; 1074} 1075 1076static struct loader_icd *loader_icd_add( 1077 struct loader_instance *ptr_inst, 1078 const struct loader_scanned_icds *icd_lib) 1079{ 1080 struct loader_icd *icd; 1081 1082 icd = loader_icd_create(ptr_inst); 1083 if (!icd) 1084 return NULL; 1085 1086 icd->this_icd_lib = icd_lib; 1087 icd->this_instance = ptr_inst; 1088 1089 /* prepend to the list */ 1090 icd->next = ptr_inst->icds; 1091 ptr_inst->icds = icd; 1092 ptr_inst->total_icd_count++; 1093 1094 return icd; 1095} 1096 1097void loader_scanned_icd_clear( 1098 const struct loader_instance *inst, 1099 struct loader_icd_libs *icd_libs) 1100{ 1101 if (icd_libs->capacity == 0) 1102 return; 1103 for (uint32_t i = 0; i < icd_libs->count; i++) { 1104 loader_platform_close_library(icd_libs->list[i].handle); 1105 loader_heap_free(inst, icd_libs->list[i].lib_name); 1106 } 1107 loader_heap_free(inst, icd_libs->list); 1108 icd_libs->capacity = 0; 1109 icd_libs->count = 0; 1110 icd_libs->list = NULL; 1111} 1112 1113static void loader_scanned_icd_init(const struct loader_instance *inst, 1114 struct loader_icd_libs *icd_libs) 1115{ 1116 loader_scanned_icd_clear(inst, icd_libs); 1117 icd_libs->capacity = 8 * sizeof(struct loader_scanned_icds); 1118 icd_libs->list = loader_heap_alloc(inst, icd_libs->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1119 1120} 1121 1122static void loader_scanned_icd_add( 1123 const struct loader_instance *inst, 1124 struct loader_icd_libs *icd_libs, 1125 const char *filename) 1126{ 1127 loader_platform_dl_handle handle; 1128 PFN_vkCreateInstance fp_create_inst; 1129 PFN_vkEnumerateInstanceExtensionProperties fp_get_global_ext_props; 1130 PFN_vkGetInstanceProcAddr fp_get_proc_addr; 1131 struct loader_scanned_icds *new_node; 1132 1133 /* TODO implement ref counting of libraries, for now this function leaves 1134 libraries open and the scanned_icd_clear closes them */ 1135 // Used to call: dlopen(filename, RTLD_LAZY); 1136 handle = loader_platform_open_library(filename); 1137 if (!handle) { 1138 loader_log(VK_DBG_REPORT_WARN_BIT, 0, loader_platform_open_library_error(filename)); 1139 return; 1140 } 1141 1142#define LOOKUP_LD(func_ptr, func) do { \ 1143 func_ptr = (PFN_vk ##func) loader_platform_get_proc_address(handle, "vk" #func); \ 1144 if (!func_ptr) { \ 1145 loader_log(VK_DBG_REPORT_WARN_BIT, 0, loader_platform_get_proc_address_error("vk" #func)); \ 1146 return; \ 1147 } \ 1148} while (0) 1149 1150 LOOKUP_LD(fp_get_proc_addr, GetInstanceProcAddr); 1151 LOOKUP_LD(fp_create_inst, CreateInstance); 1152 LOOKUP_LD(fp_get_global_ext_props, EnumerateInstanceExtensionProperties); 1153 1154#undef LOOKUP_LD 1155 1156 // check for enough capacity 1157 if ((icd_libs->count * sizeof(struct loader_scanned_icds)) >= icd_libs->capacity) { 1158 1159 icd_libs->list = loader_heap_realloc(inst, 1160 icd_libs->list, 1161 icd_libs->capacity, 1162 icd_libs->capacity * 2, 1163 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1164 // double capacity 1165 icd_libs->capacity *= 2; 1166 } 1167 new_node = &(icd_libs->list[icd_libs->count]); 1168 1169 new_node->handle = handle; 1170 new_node->GetInstanceProcAddr = fp_get_proc_addr; 1171 new_node->CreateInstance = fp_create_inst; 1172 new_node->EnumerateInstanceExtensionProperties = fp_get_global_ext_props; 1173 1174 new_node->lib_name = (char *) loader_heap_alloc(inst, 1175 strlen(filename) + 1, 1176 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1177 if (!new_node->lib_name) { 1178 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Out of memory can't add icd"); 1179 return; 1180 } 1181 strcpy(new_node->lib_name, filename); 1182 icd_libs->count++; 1183} 1184 1185static bool loader_icd_init_entrys(struct loader_icd *icd, 1186 VkInstance inst, 1187 const PFN_vkGetInstanceProcAddr fp_gipa) 1188{ 1189 /* initialize entrypoint function pointers */ 1190 1191 #define LOOKUP_GIPA(func, required) do { \ 1192 icd->func = (PFN_vk ##func) fp_gipa(inst, "vk" #func); \ 1193 if (!icd->func && required) { \ 1194 loader_log(VK_DBG_REPORT_WARN_BIT, 0, \ 1195 loader_platform_get_proc_address_error("vk" #func)); \ 1196 return false; \ 1197 } \ 1198 } while (0) 1199 1200 LOOKUP_GIPA(GetDeviceProcAddr, true); 1201 LOOKUP_GIPA(DestroyInstance, true); 1202 LOOKUP_GIPA(EnumeratePhysicalDevices, true); 1203 LOOKUP_GIPA(GetPhysicalDeviceFeatures, true); 1204 LOOKUP_GIPA(GetPhysicalDeviceFormatProperties, true); 1205 LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties, true); 1206 LOOKUP_GIPA(CreateDevice, true); 1207 LOOKUP_GIPA(GetPhysicalDeviceProperties, true); 1208 LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties, true); 1209 LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties, true); 1210 LOOKUP_GIPA(EnumerateDeviceExtensionProperties, true); 1211 LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties, true); 1212 LOOKUP_GIPA(DbgCreateMsgCallback, false); 1213 LOOKUP_GIPA(DbgDestroyMsgCallback, false); 1214 LOOKUP_GIPA(GetPhysicalDeviceSurfaceSupportKHR, false); 1215 1216#undef LOOKUP_GIPA 1217 1218 return true; 1219} 1220 1221static void loader_debug_init(void) 1222{ 1223 const char *env; 1224 1225 if (g_loader_debug > 0) 1226 return; 1227 1228 g_loader_debug = 0; 1229 1230 /* parse comma-separated debug options */ 1231 env = getenv("VK_LOADER_DEBUG"); 1232 while (env) { 1233 const char *p = strchr(env, ','); 1234 size_t len; 1235 1236 if (p) 1237 len = p - env; 1238 else 1239 len = strlen(env); 1240 1241 if (len > 0) { 1242 if (strncmp(env, "warn", len) == 0) { 1243 g_loader_debug |= LOADER_WARN_BIT; 1244 g_loader_log_msgs |= VK_DBG_REPORT_WARN_BIT; 1245 } else if (strncmp(env, "info", len) == 0) { 1246 g_loader_debug |= LOADER_INFO_BIT; 1247 g_loader_log_msgs |= VK_DBG_REPORT_INFO_BIT; 1248 } else if (strncmp(env, "perf", len) == 0) { 1249 g_loader_debug |= LOADER_PERF_BIT; 1250 g_loader_log_msgs |= VK_DBG_REPORT_PERF_WARN_BIT; 1251 } else if (strncmp(env, "error", len) == 0) { 1252 g_loader_debug |= LOADER_ERROR_BIT; 1253 g_loader_log_msgs |= VK_DBG_REPORT_ERROR_BIT; 1254 } else if (strncmp(env, "debug", len) == 0) { 1255 g_loader_debug |= LOADER_DEBUG_BIT; 1256 g_loader_log_msgs |= VK_DBG_REPORT_DEBUG_BIT; 1257 } 1258 } 1259 1260 if (!p) 1261 break; 1262 1263 env = p + 1; 1264 } 1265} 1266 1267void loader_initialize(void) 1268{ 1269 // initialize mutexs 1270 loader_platform_thread_create_mutex(&loader_lock); 1271 loader_platform_thread_create_mutex(&loader_json_lock); 1272 1273 // initialize logging 1274 loader_debug_init(); 1275 1276 // initial cJSON to use alloc callbacks 1277 cJSON_Hooks alloc_fns = { 1278 .malloc_fn = loader_tls_heap_alloc, 1279 .free_fn = loader_tls_heap_free, 1280 }; 1281 cJSON_InitHooks(&alloc_fns); 1282} 1283 1284struct loader_manifest_files { 1285 uint32_t count; 1286 char **filename_list; 1287}; 1288 1289/** 1290 * Get next file or dirname given a string list or registry key path 1291 * 1292 * \returns 1293 * A pointer to first char in the next path. 1294 * The next path (or NULL) in the list is returned in next_path. 1295 * Note: input string is modified in some cases. PASS IN A COPY! 1296 */ 1297static char *loader_get_next_path(char *path) 1298{ 1299 uint32_t len; 1300 char *next; 1301 1302 if (path == NULL) 1303 return NULL; 1304 next = strchr(path, PATH_SEPERATOR); 1305 if (next == NULL) { 1306 len = (uint32_t) strlen(path); 1307 next = path + len; 1308 } 1309 else { 1310 *next = '\0'; 1311 next++; 1312 } 1313 1314 return next; 1315} 1316 1317/** 1318 * Given a path which is absolute or relative, expand the path if relative or 1319 * leave the path unmodified if absolute. The base path to prepend to relative 1320 * paths is given in rel_base. 1321 * 1322 * \returns 1323 * A string in out_fullpath of the full absolute path 1324 */ 1325static void loader_expand_path(const char *path, 1326 const char *rel_base, 1327 size_t out_size, 1328 char *out_fullpath) 1329{ 1330 if (loader_platform_is_path_absolute(path)) { 1331 // do not prepend a base to an absolute path 1332 rel_base = ""; 1333 } 1334 1335 loader_platform_combine_path(out_fullpath, out_size, rel_base, path, NULL); 1336} 1337 1338/** 1339 * Given a filename (file) and a list of paths (dir), try to find an existing 1340 * file in the paths. If filename already is a path then no 1341 * searching in the given paths. 1342 * 1343 * \returns 1344 * A string in out_fullpath of either the full path or file. 1345 */ 1346static void loader_get_fullpath(const char *file, 1347 const char *dirs, 1348 size_t out_size, 1349 char *out_fullpath) 1350{ 1351 if (!loader_platform_is_path(file) && *dirs) { 1352 char *dirs_copy, *dir, *next_dir; 1353 1354 dirs_copy = loader_stack_alloc(strlen(dirs) + 1); 1355 strcpy(dirs_copy, dirs); 1356 1357 //find if file exists after prepending paths in given list 1358 for (dir = dirs_copy; 1359 *dir && (next_dir = loader_get_next_path(dir)); 1360 dir = next_dir) { 1361 loader_platform_combine_path(out_fullpath, out_size, dir, file, NULL); 1362 if (loader_platform_file_exists(out_fullpath)) { 1363 return; 1364 } 1365 } 1366 } 1367 1368 snprintf(out_fullpath, out_size, "%s", file); 1369} 1370 1371/** 1372 * Read a JSON file into a buffer. 1373 * 1374 * \returns 1375 * A pointer to a cJSON object representing the JSON parse tree. 1376 * This returned buffer should be freed by caller. 1377 */ 1378static cJSON *loader_get_json(const char *filename) 1379{ 1380 FILE *file; 1381 char *json_buf; 1382 cJSON *json; 1383 uint64_t len; 1384 file = fopen(filename,"rb"); 1385 if (!file) { 1386 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Couldn't open JSON file %s", filename); 1387 return NULL; 1388 } 1389 fseek(file, 0, SEEK_END); 1390 len = ftell(file); 1391 fseek(file, 0, SEEK_SET); 1392 json_buf = (char*) loader_stack_alloc(len+1); 1393 if (json_buf == NULL) { 1394 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get JSON file"); 1395 fclose(file); 1396 return NULL; 1397 } 1398 if (fread(json_buf, sizeof(char), len, file) != len) { 1399 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "fread failed can't get JSON file"); 1400 fclose(file); 1401 return NULL; 1402 } 1403 fclose(file); 1404 json_buf[len] = '\0'; 1405 1406 //parse text from file 1407 json = cJSON_Parse(json_buf); 1408 if (json == NULL) 1409 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Can't parse JSON file %s", filename); 1410 return json; 1411} 1412 1413/** 1414 * Do a deep copy of the loader_layer_properties structure. 1415 */ 1416static void loader_copy_layer_properties( 1417 const struct loader_instance *inst, 1418 struct loader_layer_properties *dst, 1419 struct loader_layer_properties *src) 1420{ 1421 memcpy(dst, src, sizeof (*src)); 1422 dst->instance_extension_list.list = loader_heap_alloc( 1423 inst, 1424 sizeof(VkExtensionProperties) * 1425 src->instance_extension_list.count, 1426 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1427 dst->instance_extension_list.capacity = sizeof(VkExtensionProperties) * 1428 src->instance_extension_list.count; 1429 memcpy(dst->instance_extension_list.list, src->instance_extension_list.list, 1430 dst->instance_extension_list.capacity); 1431 dst->device_extension_list.list = loader_heap_alloc( 1432 inst, 1433 sizeof(VkExtensionProperties) * 1434 src->device_extension_list.count, 1435 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1436 dst->device_extension_list.capacity = sizeof(VkExtensionProperties) * 1437 src->device_extension_list.count; 1438 memcpy(dst->device_extension_list.list, src->device_extension_list.list, 1439 dst->device_extension_list.capacity); 1440} 1441 1442/** 1443 * Given a cJSON struct (json) of the top level JSON object from layer manifest 1444 * file, add entry to the layer_list. 1445 * Fill out the layer_properties in this list entry from the input cJSON object. 1446 * 1447 * \returns 1448 * void 1449 * layer_list has a new entry and initialized accordingly. 1450 * If the json input object does not have all the required fields no entry 1451 * is added to the list. 1452 */ 1453static void loader_add_layer_properties(const struct loader_instance *inst, 1454 struct loader_layer_list *layer_instance_list, 1455 struct loader_layer_list *layer_device_list, 1456 cJSON *json, 1457 bool is_implicit, 1458 char *filename) 1459{ 1460 /* Fields in layer manifest file that are required: 1461 * (required) “file_format_version” 1462 * following are required in the "layer" object: 1463 * (required) "name" 1464 * (required) "type" 1465 * (required) “library_path” 1466 * (required) “abi_versions” 1467 * (required) “implementation_version” 1468 * (required) “description” 1469 * (required for implicit layers) “disable_environment” 1470 * 1471 * First get all required items and if any missing abort 1472 */ 1473 1474 cJSON *item, *layer_node, *ext_item; 1475 char *temp; 1476 char *name, *type, *library_path, *abi_versions; 1477 char *implementation_version, *description; 1478 cJSON *disable_environment; 1479 int i; 1480 VkExtensionProperties ext_prop; 1481 item = cJSON_GetObjectItem(json, "file_format_version"); 1482 if (item == NULL) { 1483 return; 1484 } 1485 char *file_vers = cJSON_PrintUnformatted(item); 1486 loader_log(VK_DBG_REPORT_INFO_BIT, 0, "Found manifest file %s, version %s", 1487 filename, file_vers); 1488 if (strcmp(file_vers, "\"0.9.0\"") != 0) 1489 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Unexpected manifest file version (expected 0.9.0), may cause errors"); 1490 loader_tls_heap_free(file_vers); 1491 1492 layer_node = cJSON_GetObjectItem(json, "layer"); 1493 if (layer_node == NULL) { 1494 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"layer\" object in manifest JSON file, skipping"); 1495 return; 1496 } 1497 1498 // loop through all "layer" objects in the file 1499 do { 1500#define GET_JSON_OBJECT(node, var) { \ 1501 var = cJSON_GetObjectItem(node, #var); \ 1502 if (var == NULL) { \ 1503 layer_node = layer_node->next; \ 1504 continue; \ 1505 } \ 1506 } 1507#define GET_JSON_ITEM(node, var) { \ 1508 item = cJSON_GetObjectItem(node, #var); \ 1509 if (item == NULL) { \ 1510 layer_node = layer_node->next; \ 1511 continue; \ 1512 } \ 1513 temp = cJSON_Print(item); \ 1514 temp[strlen(temp) - 1] = '\0'; \ 1515 var = loader_stack_alloc(strlen(temp) + 1); \ 1516 strcpy(var, &temp[1]); \ 1517 loader_tls_heap_free(temp); \ 1518 } 1519 GET_JSON_ITEM(layer_node, name) 1520 GET_JSON_ITEM(layer_node, type) 1521 GET_JSON_ITEM(layer_node, library_path) 1522 GET_JSON_ITEM(layer_node, abi_versions) 1523 GET_JSON_ITEM(layer_node, implementation_version) 1524 GET_JSON_ITEM(layer_node, description) 1525 if (is_implicit) { 1526 GET_JSON_OBJECT(layer_node, disable_environment) 1527 } 1528#undef GET_JSON_ITEM 1529#undef GET_JSON_OBJECT 1530 1531 // add list entry 1532 struct loader_layer_properties *props=NULL; 1533 if (!strcmp(type, "DEVICE")) { 1534 if (layer_device_list == NULL) { 1535 layer_node = layer_node->next; 1536 continue; 1537 } 1538 props = loader_get_next_layer_property(inst, layer_device_list); 1539 props->type = (is_implicit) ? VK_LAYER_TYPE_DEVICE_IMPLICIT : VK_LAYER_TYPE_DEVICE_EXPLICIT; 1540 } 1541 if (!strcmp(type, "INSTANCE")) { 1542 if (layer_instance_list == NULL) { 1543 layer_node = layer_node->next; 1544 continue; 1545 } 1546 props = loader_get_next_layer_property(inst, layer_instance_list); 1547 props->type = (is_implicit) ? VK_LAYER_TYPE_INSTANCE_IMPLICIT : VK_LAYER_TYPE_INSTANCE_EXPLICIT; 1548 } 1549 if (!strcmp(type, "GLOBAL")) { 1550 if (layer_instance_list != NULL) 1551 props = loader_get_next_layer_property(inst, layer_instance_list); 1552 else if (layer_device_list != NULL) 1553 props = loader_get_next_layer_property(inst, layer_device_list); 1554 else { 1555 layer_node = layer_node->next; 1556 continue; 1557 } 1558 props->type = (is_implicit) ? VK_LAYER_TYPE_GLOBAL_IMPLICIT : VK_LAYER_TYPE_GLOBAL_EXPLICIT; 1559 } 1560 1561 if (props == NULL) { 1562 layer_node = layer_node->next; 1563 continue; 1564 } 1565 1566 strncpy(props->info.layerName, name, sizeof (props->info.layerName)); 1567 props->info.layerName[sizeof (props->info.layerName) - 1] = '\0'; 1568 1569 char *fullpath = props->lib_name; 1570 char *rel_base; 1571 if (loader_platform_is_path(filename)) { 1572 // a relative or absolute path 1573 char *name_copy = loader_stack_alloc(strlen(filename) + 1); 1574 strcpy(name_copy, filename); 1575 rel_base = loader_platform_dirname(name_copy); 1576 loader_expand_path(library_path, rel_base, MAX_STRING_SIZE, fullpath); 1577 } else { 1578 // a filename which is assumed in a system directory 1579 loader_get_fullpath(library_path, DEFAULT_VK_LAYERS_PATH, MAX_STRING_SIZE, fullpath); 1580 } 1581 props->info.specVersion = loader_make_version(abi_versions); 1582 props->info.implementationVersion = loader_make_version(implementation_version); 1583 strncpy((char *) props->info.description, description, sizeof (props->info.description)); 1584 props->info.description[sizeof (props->info.description) - 1] = '\0'; 1585 if (is_implicit) { 1586 strncpy(props->disable_env_var.name, disable_environment->child->string, sizeof (props->disable_env_var.name)); 1587 props->disable_env_var.name[sizeof (props->disable_env_var.name) - 1] = '\0'; 1588 strncpy(props->disable_env_var.value, disable_environment->child->valuestring, sizeof (props->disable_env_var.value)); 1589 props->disable_env_var.value[sizeof (props->disable_env_var.value) - 1] = '\0'; 1590 } 1591 1592 /** 1593 * Now get all optional items and objects and put in list: 1594 * functions 1595 * instance_extensions 1596 * device_extensions 1597 * enable_environment (implicit layers only) 1598 */ 1599#define GET_JSON_OBJECT(node, var) { \ 1600 var = cJSON_GetObjectItem(node, #var); \ 1601 } 1602#define GET_JSON_ITEM(node, var) { \ 1603 item = cJSON_GetObjectItem(node, #var); \ 1604 if (item != NULL) { \ 1605 temp = cJSON_Print(item); \ 1606 temp[strlen(temp) - 1] = '\0'; \ 1607 var = loader_stack_alloc(strlen(temp) + 1);\ 1608 strcpy(var, &temp[1]); \ 1609 loader_tls_heap_free(temp); \ 1610 } \ 1611 } 1612 1613 cJSON *instance_extensions, *device_extensions, *functions, *enable_environment; 1614 char *vkGetInstanceProcAddr = NULL, *vkGetDeviceProcAddr = NULL, *version=NULL; 1615 GET_JSON_OBJECT(layer_node, functions) 1616 if (functions != NULL) { 1617 GET_JSON_ITEM(functions, vkGetInstanceProcAddr) 1618 GET_JSON_ITEM(functions, vkGetDeviceProcAddr) 1619 if (vkGetInstanceProcAddr != NULL) 1620 strncpy(props->functions.str_gipa, vkGetInstanceProcAddr, sizeof (props->functions.str_gipa)); 1621 props->functions.str_gipa[sizeof (props->functions.str_gipa) - 1] = '\0'; 1622 if (vkGetDeviceProcAddr != NULL) 1623 strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr, sizeof (props->functions.str_gdpa)); 1624 props->functions.str_gdpa[sizeof (props->functions.str_gdpa) - 1] = '\0'; 1625 } 1626 GET_JSON_OBJECT(layer_node, instance_extensions) 1627 if (instance_extensions != NULL) { 1628 int count = cJSON_GetArraySize(instance_extensions); 1629 for (i = 0; i < count; i++) { 1630 ext_item = cJSON_GetArrayItem(instance_extensions, i); 1631 GET_JSON_ITEM(ext_item, name) 1632 GET_JSON_ITEM(ext_item, version) 1633 strncpy(ext_prop.extensionName, name, sizeof (ext_prop.extensionName)); 1634 ext_prop.extensionName[sizeof (ext_prop.extensionName) - 1] = '\0'; 1635 ext_prop.specVersion = loader_make_version(version); 1636 loader_add_to_ext_list(inst, &props->instance_extension_list, 1, &ext_prop); 1637 } 1638 } 1639 GET_JSON_OBJECT(layer_node, device_extensions) 1640 if (device_extensions != NULL) { 1641 int count = cJSON_GetArraySize(device_extensions); 1642 for (i = 0; i < count; i++) { 1643 ext_item = cJSON_GetArrayItem(device_extensions, i); 1644 GET_JSON_ITEM(ext_item, name); 1645 GET_JSON_ITEM(ext_item, version); 1646 strncpy(ext_prop.extensionName, name, sizeof (ext_prop.extensionName)); 1647 ext_prop.extensionName[sizeof (ext_prop.extensionName) - 1] = '\0'; 1648 ext_prop.specVersion = loader_make_version(version); 1649 loader_add_to_ext_list(inst, &props->device_extension_list, 1, &ext_prop); 1650 } 1651 } 1652 if (is_implicit) { 1653 GET_JSON_OBJECT(layer_node, enable_environment) 1654 strncpy(props->enable_env_var.name, enable_environment->child->string, sizeof (props->enable_env_var.name)); 1655 props->enable_env_var.name[sizeof (props->enable_env_var.name) - 1] = '\0'; 1656 strncpy(props->enable_env_var.value, enable_environment->child->valuestring, sizeof (props->enable_env_var.value)); 1657 props->enable_env_var.value[sizeof (props->enable_env_var.value) - 1] = '\0'; 1658 } 1659#undef GET_JSON_ITEM 1660#undef GET_JSON_OBJECT 1661 // for global layers need to add them to both device and instance list 1662 if (!strcmp(type, "GLOBAL")) { 1663 struct loader_layer_properties *dev_props; 1664 if (layer_instance_list == NULL || layer_device_list == NULL) { 1665 layer_node = layer_node->next; 1666 continue; 1667 } 1668 dev_props = loader_get_next_layer_property(inst, layer_device_list); 1669 //copy into device layer list 1670 loader_copy_layer_properties(inst, dev_props, props); 1671 } 1672 layer_node = layer_node->next; 1673 } while (layer_node != NULL); 1674 return; 1675} 1676 1677/** 1678 * Find the Vulkan library manifest files. 1679 * 1680 * This function scans the location or env_override directories/files 1681 * for a list of JSON manifest files. If env_override is non-NULL 1682 * and has a valid value. Then the location is ignored. Otherwise 1683 * location is used to look for manifest files. The location 1684 * is interpreted as Registry path on Windows and a directory path(s) 1685 * on Linux. 1686 * 1687 * \returns 1688 * A string list of manifest files to be opened in out_files param. 1689 * List has a pointer to string for each manifest filename. 1690 * When done using the list in out_files, pointers should be freed. 1691 * Location or override string lists can be either files or directories as follows: 1692 * | location | override 1693 * -------------------------------- 1694 * Win ICD | files | files 1695 * Win Layer | files | dirs 1696 * Linux ICD | dirs | files 1697 * Linux Layer| dirs | dirs 1698 */ 1699static void loader_get_manifest_files(const struct loader_instance *inst, 1700 const char *env_override, 1701 bool is_layer, 1702 const char *location, 1703 struct loader_manifest_files *out_files) 1704{ 1705 char *override = NULL; 1706 char *loc; 1707 char *file, *next_file, *name; 1708 size_t alloced_count = 64; 1709 char full_path[2048]; 1710 DIR *sysdir = NULL; 1711 bool list_is_dirs = false; 1712 struct dirent *dent; 1713 1714 out_files->count = 0; 1715 out_files->filename_list = NULL; 1716 1717 if (env_override != NULL && (override = getenv(env_override))) { 1718#if !defined(_WIN32) 1719 if (geteuid() != getuid()) { 1720 /* Don't allow setuid apps to use the env var: */ 1721 override = NULL; 1722 } 1723#endif 1724 } 1725 1726 if (location == NULL) { 1727 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 1728 "Can't get manifest files with NULL location, env_override=%s", 1729 env_override); 1730 return; 1731 } 1732 1733#if defined(_WIN32) 1734 list_is_dirs = (is_layer && override != NULL) ? true : false; 1735#else 1736 list_is_dirs = (override == NULL || is_layer) ? true : false; 1737#endif 1738 // Make a copy of the input we are using so it is not modified 1739 // Also handle getting the location(s) from registry on Windows 1740 if (override == NULL) { 1741 loc = loader_stack_alloc(strlen(location) + 1); 1742 if (loc == NULL) { 1743 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1744 return; 1745 } 1746 strcpy(loc, location); 1747#if defined(_WIN32) 1748 loc = loader_get_registry_files(inst, loc); 1749 if (loc == NULL) { 1750 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Registry lookup failed can't get manifest files"); 1751 return; 1752 } 1753#endif 1754 } 1755 else { 1756 loc = loader_stack_alloc(strlen(override) + 1); 1757 if (loc == NULL) { 1758 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1759 return; 1760 } 1761 strcpy(loc, override); 1762 } 1763 1764 // Print out the paths being searched if debugging is enabled 1765 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Searching the following paths for manifest files: %s\n", loc); 1766 1767 file = loc; 1768 while (*file) { 1769 next_file = loader_get_next_path(file); 1770 if (list_is_dirs) { 1771 sysdir = opendir(file); 1772 name = NULL; 1773 if (sysdir) { 1774 dent = readdir(sysdir); 1775 if (dent == NULL) 1776 break; 1777 name = &(dent->d_name[0]); 1778 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1779 name = full_path; 1780 } 1781 } 1782 else { 1783#if defined(_WIN32) 1784 name = file; 1785#else 1786 // only Linux has relative paths 1787 char *dir; 1788 // make a copy of location so it isn't modified 1789 dir = loader_stack_alloc(strlen(loc) + 1); 1790 if (dir == NULL) { 1791 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1792 return; 1793 } 1794 strcpy(dir, loc); 1795 1796 loader_get_fullpath(file, dir, sizeof(full_path), full_path); 1797 1798 name = full_path; 1799#endif 1800 } 1801 while (name) { 1802 /* Look for files ending with ".json" suffix */ 1803 uint32_t nlen = (uint32_t) strlen(name); 1804 const char *suf = name + nlen - 5; 1805 if ((nlen > 5) && !strncmp(suf, ".json", 5)) { 1806 if (out_files->count == 0) { 1807 out_files->filename_list = loader_heap_alloc(inst, 1808 alloced_count * sizeof(char *), 1809 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 1810 } 1811 else if (out_files->count == alloced_count) { 1812 out_files->filename_list = loader_heap_realloc(inst, 1813 out_files->filename_list, 1814 alloced_count * sizeof(char *), 1815 alloced_count * sizeof(char *) * 2, 1816 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 1817 alloced_count *= 2; 1818 } 1819 if (out_files->filename_list == NULL) { 1820 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't alloc manifest file list"); 1821 return; 1822 } 1823 out_files->filename_list[out_files->count] = loader_heap_alloc( 1824 inst, 1825 strlen(name) + 1, 1826 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 1827 if (out_files->filename_list[out_files->count] == NULL) { 1828 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1829 return; 1830 } 1831 strcpy(out_files->filename_list[out_files->count], name); 1832 out_files->count++; 1833 } else if (!list_is_dirs) { 1834 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Skipping manifest file %s, file name must end in .json", name); 1835 } 1836 if (list_is_dirs) { 1837 dent = readdir(sysdir); 1838 if (dent == NULL) 1839 break; 1840 name = &(dent->d_name[0]); 1841 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1842 name = full_path; 1843 } 1844 else { 1845 break; 1846 } 1847 } 1848 if (sysdir) 1849 closedir(sysdir); 1850 file = next_file; 1851 } 1852 return; 1853} 1854 1855void loader_init_icd_lib_list() 1856{ 1857 1858} 1859 1860void loader_destroy_icd_lib_list() 1861{ 1862 1863} 1864/** 1865 * Try to find the Vulkan ICD driver(s). 1866 * 1867 * This function scans the default system loader path(s) or path 1868 * specified by the \c VK_ICD_FILENAMES environment variable in 1869 * order to find loadable VK ICDs manifest files. From these 1870 * manifest files it finds the ICD libraries. 1871 * 1872 * \returns 1873 * a list of icds that were discovered 1874 */ 1875void loader_icd_scan( 1876 const struct loader_instance *inst, 1877 struct loader_icd_libs *icds) 1878{ 1879 char *file_str; 1880 struct loader_manifest_files manifest_files; 1881 1882 loader_scanned_icd_init(inst, icds); 1883 // Get a list of manifest files for ICDs 1884 loader_get_manifest_files(inst, "VK_ICD_FILENAMES", false, 1885 DEFAULT_VK_DRIVERS_INFO, &manifest_files); 1886 if (manifest_files.count == 0) 1887 return; 1888 loader_platform_thread_lock_mutex(&loader_json_lock); 1889 for (uint32_t i = 0; i < manifest_files.count; i++) { 1890 file_str = manifest_files.filename_list[i]; 1891 if (file_str == NULL) 1892 continue; 1893 1894 cJSON *json; 1895 json = loader_get_json(file_str); 1896 if (!json) 1897 continue; 1898 cJSON *item; 1899 item = cJSON_GetObjectItem(json, "file_format_version"); 1900 if (item == NULL) { 1901 loader_platform_thread_unlock_mutex(&loader_json_lock); 1902 return; 1903 } 1904 char *file_vers = cJSON_Print(item); 1905 loader_log(VK_DBG_REPORT_INFO_BIT, 0, "Found manifest file %s, version %s", 1906 file_str, file_vers); 1907 if (strcmp(file_vers, "\"1.0.0\"") != 0) 1908 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Unexpected manifest file version (expected 1.0.0), may cause errors"); 1909 loader_tls_heap_free(file_vers); 1910 item = cJSON_GetObjectItem(json, "ICD"); 1911 if (item != NULL) { 1912 item = cJSON_GetObjectItem(item, "library_path"); 1913 if (item != NULL) { 1914 char *temp= cJSON_Print(item); 1915 if (!temp || strlen(temp) == 0) { 1916 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 1917 loader_tls_heap_free(temp); 1918 loader_heap_free(inst, file_str); 1919 cJSON_Delete(json); 1920 continue; 1921 } 1922 //strip out extra quotes 1923 temp[strlen(temp) - 1] = '\0'; 1924 char *library_path = loader_stack_alloc(strlen(temp) + 1); 1925 strcpy(library_path, &temp[1]); 1926 loader_tls_heap_free(temp); 1927 if (!library_path || strlen(library_path) == 0) { 1928 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 1929 loader_heap_free(inst, file_str); 1930 cJSON_Delete(json); 1931 continue; 1932 } 1933 char fullpath[MAX_STRING_SIZE]; 1934 // Print out the paths being searched if debugging is enabled 1935 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Searching for ICD drivers named %s default dir %s\n", library_path, DEFAULT_VK_DRIVERS_PATH); 1936 if (loader_platform_is_path(library_path)) { 1937 // a relative or absolute path 1938 char *name_copy = loader_stack_alloc(strlen(file_str) + 1); 1939 char *rel_base; 1940 strcpy(name_copy, file_str); 1941 rel_base = loader_platform_dirname(name_copy); 1942 loader_expand_path(library_path, rel_base, sizeof(fullpath), fullpath); 1943 } else { 1944 // a filename which is assumed in a system directory 1945 loader_get_fullpath(library_path, DEFAULT_VK_DRIVERS_PATH, sizeof(fullpath), fullpath); 1946 } 1947 loader_scanned_icd_add(inst, icds, fullpath); 1948 } 1949 } 1950 else 1951 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"ICD\" object in ICD JSON file %s, skipping", file_str); 1952 1953 loader_heap_free(inst, file_str); 1954 cJSON_Delete(json); 1955 } 1956 loader_heap_free(inst, manifest_files.filename_list); 1957 loader_platform_thread_unlock_mutex(&loader_json_lock); 1958} 1959 1960 1961void loader_layer_scan( 1962 const struct loader_instance *inst, 1963 struct loader_layer_list *instance_layers, 1964 struct loader_layer_list *device_layers) 1965{ 1966 char *file_str; 1967 struct loader_manifest_files manifest_files; 1968 cJSON *json; 1969 uint32_t i; 1970 1971 // Get a list of manifest files for layers 1972 loader_get_manifest_files(inst, LAYERS_PATH_ENV, true, DEFAULT_VK_LAYERS_INFO, 1973 &manifest_files); 1974 if (manifest_files.count == 0) 1975 return; 1976 1977#if 0 //TODO 1978 /** 1979 * We need a list of the layer libraries, not just a list of 1980 * the layer properties (a layer library could expose more than 1981 * one layer property). This list of scanned layers would be 1982 * used to check for global and physicaldevice layer properties. 1983 */ 1984 if (!loader_init_layer_library_list(&loader.scanned_layer_libraries)) { 1985 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 1986 "Alloc for layer list failed: %s line: %d", __FILE__, __LINE__); 1987 return; 1988 } 1989#endif 1990 1991 /* cleanup any previously scanned libraries */ 1992 loader_delete_layer_properties(inst, instance_layers); 1993 loader_delete_layer_properties(inst, device_layers); 1994 1995 loader_platform_thread_lock_mutex(&loader_json_lock); 1996 for (i = 0; i < manifest_files.count; i++) { 1997 file_str = manifest_files.filename_list[i]; 1998 if (file_str == NULL) 1999 continue; 2000 2001 // parse file into JSON struct 2002 json = loader_get_json(file_str); 2003 if (!json) { 2004 continue; 2005 } 2006 2007 //TODO pass in implicit versus explicit bool 2008 //TODO error if device layers expose instance_extensions 2009 //TODO error if instance layers expose device extensions 2010 loader_add_layer_properties(inst, 2011 instance_layers, 2012 device_layers, 2013 json, 2014 false, 2015 file_str); 2016 2017 loader_heap_free(inst, file_str); 2018 cJSON_Delete(json); 2019 } 2020 loader_heap_free(inst, manifest_files.filename_list); 2021 loader_platform_thread_unlock_mutex(&loader_json_lock); 2022} 2023 2024static PFN_vkVoidFunction VKAPI loader_gpa_instance_internal(VkInstance inst, const char * pName) 2025{ 2026 // inst is not wrapped 2027 if (inst == VK_NULL_HANDLE) { 2028 return NULL; 2029 } 2030 VkLayerInstanceDispatchTable* disp_table = * (VkLayerInstanceDispatchTable **) inst; 2031 void *addr; 2032 2033 if (!strcmp(pName, "vkGetInstanceProcAddr")) 2034 return (void *) loader_gpa_instance_internal; 2035 2036 if (disp_table == NULL) 2037 return NULL; 2038 2039 addr = loader_lookup_instance_dispatch_table(disp_table, pName); 2040 if (addr) { 2041 return addr; 2042 } 2043 2044 if (disp_table->GetInstanceProcAddr == NULL) { 2045 return NULL; 2046 } 2047 return disp_table->GetInstanceProcAddr(inst, pName); 2048} 2049 2050struct loader_instance *loader_get_instance(const VkInstance instance) 2051{ 2052 /* look up the loader_instance in our list by comparing dispatch tables, as 2053 * there is no guarantee the instance is still a loader_instance* after any 2054 * layers which wrap the instance object. 2055 */ 2056 const VkLayerInstanceDispatchTable *disp; 2057 struct loader_instance *ptr_instance = NULL; 2058 disp = loader_get_instance_dispatch(instance); 2059 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 2060 if (inst->disp == disp) { 2061 ptr_instance = inst; 2062 break; 2063 } 2064 } 2065 return ptr_instance; 2066} 2067 2068static loader_platform_dl_handle loader_add_layer_lib( 2069 const struct loader_instance *inst, 2070 const char *chain_type, 2071 struct loader_layer_properties *layer_prop) 2072{ 2073 struct loader_lib_info *new_layer_lib_list, *my_lib; 2074 size_t new_alloc_size; 2075 /* 2076 * TODO: We can now track this information in the 2077 * scanned_layer_libraries list. 2078 */ 2079 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2080 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2081 /* Have already loaded this library, just increment ref count */ 2082 loader.loaded_layer_lib_list[i].ref_count++; 2083 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2084 "%s Chain: Increment layer reference count for layer library %s", 2085 chain_type, layer_prop->lib_name); 2086 return loader.loaded_layer_lib_list[i].lib_handle; 2087 } 2088 } 2089 2090 /* Haven't seen this library so load it */ 2091 new_alloc_size = 0; 2092 if (loader.loaded_layer_lib_capacity == 0) 2093 new_alloc_size = 8 * sizeof(struct loader_lib_info); 2094 else if (loader.loaded_layer_lib_capacity <= loader.loaded_layer_lib_count * 2095 sizeof(struct loader_lib_info)) 2096 new_alloc_size = loader.loaded_layer_lib_capacity * 2; 2097 2098 if (new_alloc_size) { 2099 new_layer_lib_list = loader_heap_realloc( 2100 inst, loader.loaded_layer_lib_list, 2101 loader.loaded_layer_lib_capacity, 2102 new_alloc_size, 2103 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2104 if (!new_layer_lib_list) { 2105 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "loader: realloc failed in loader_add_layer_lib"); 2106 return NULL; 2107 } 2108 loader.loaded_layer_lib_capacity = new_alloc_size; 2109 } else 2110 new_layer_lib_list = loader.loaded_layer_lib_list; 2111 my_lib = &new_layer_lib_list[loader.loaded_layer_lib_count]; 2112 2113 strncpy(my_lib->lib_name, layer_prop->lib_name, sizeof(my_lib->lib_name)); 2114 my_lib->lib_name[sizeof(my_lib->lib_name) - 1] = '\0'; 2115 my_lib->ref_count = 0; 2116 my_lib->lib_handle = NULL; 2117 2118 if ((my_lib->lib_handle = loader_platform_open_library(my_lib->lib_name)) == NULL) { 2119 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2120 loader_platform_open_library_error(my_lib->lib_name)); 2121 return NULL; 2122 } else { 2123 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2124 "Chain: %s: Loading layer library %s", 2125 chain_type, layer_prop->lib_name); 2126 } 2127 loader.loaded_layer_lib_count++; 2128 loader.loaded_layer_lib_list = new_layer_lib_list; 2129 my_lib->ref_count++; 2130 2131 return my_lib->lib_handle; 2132} 2133 2134static void loader_remove_layer_lib( 2135 struct loader_instance *inst, 2136 struct loader_layer_properties *layer_prop) 2137{ 2138 uint32_t idx; 2139 struct loader_lib_info *new_layer_lib_list, *my_lib = NULL; 2140 2141 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2142 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2143 /* found matching library */ 2144 idx = i; 2145 my_lib = &loader.loaded_layer_lib_list[i]; 2146 break; 2147 } 2148 } 2149 2150 if (my_lib) { 2151 my_lib->ref_count--; 2152 if (my_lib->ref_count > 0) { 2153 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2154 "Decrement reference count for layer library %s", layer_prop->lib_name); 2155 return; 2156 } 2157 } 2158 loader_platform_close_library(my_lib->lib_handle); 2159 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2160 "Unloading layer library %s", layer_prop->lib_name); 2161 2162 /* Need to remove unused library from list */ 2163 new_layer_lib_list = loader_heap_alloc(inst, 2164 loader.loaded_layer_lib_capacity, 2165 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2166 if (!new_layer_lib_list) { 2167 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "loader: heap alloc failed loader_remove_layer_library"); 2168 return; 2169 } 2170 2171 if (idx > 0) { 2172 /* Copy records before idx */ 2173 memcpy(new_layer_lib_list, &loader.loaded_layer_lib_list[0], 2174 sizeof(struct loader_lib_info) * idx); 2175 } 2176 if (idx < (loader.loaded_layer_lib_count - 1)) { 2177 /* Copy records after idx */ 2178 memcpy(&new_layer_lib_list[idx], &loader.loaded_layer_lib_list[idx+1], 2179 sizeof(struct loader_lib_info) * (loader.loaded_layer_lib_count - idx - 1)); 2180 } 2181 2182 loader_heap_free(inst, loader.loaded_layer_lib_list); 2183 loader.loaded_layer_lib_count--; 2184 loader.loaded_layer_lib_list = new_layer_lib_list; 2185} 2186 2187 2188/** 2189 * Go through the search_list and find any layers which match type. If layer 2190 * type match is found in then add it to ext_list. 2191 */ 2192//TODO need to handle implict layer enable env var and disable env var 2193static void loader_add_layer_implicit( 2194 const struct loader_instance *inst, 2195 const enum layer_type type, 2196 struct loader_layer_list *list, 2197 const struct loader_layer_list *search_list) 2198{ 2199 uint32_t i; 2200 for (i = 0; i < search_list->count; i++) { 2201 const struct loader_layer_properties *prop = &search_list->list[i]; 2202 if (prop->type & type) { 2203 /* Found an layer with the same type, add to layer_list */ 2204 loader_add_to_layer_list(inst, list, 1, prop); 2205 } 2206 } 2207 2208} 2209 2210/** 2211 * Get the layer name(s) from the env_name environment variable. If layer 2212 * is found in search_list then add it to layer_list. But only add it to 2213 * layer_list if type matches. 2214 */ 2215static void loader_add_layer_env( 2216 const struct loader_instance *inst, 2217 const enum layer_type type, 2218 const char *env_name, 2219 struct loader_layer_list *layer_list, 2220 const struct loader_layer_list *search_list) 2221{ 2222 char *layerEnv; 2223 char *next, *name; 2224 2225 layerEnv = getenv(env_name); 2226 if (layerEnv == NULL) { 2227 return; 2228 } 2229 name = loader_stack_alloc(strlen(layerEnv) + 1); 2230 if (name == NULL) { 2231 return; 2232 } 2233 strcpy(name, layerEnv); 2234 2235 while (name && *name ) { 2236 next = loader_get_next_path(name); 2237 loader_find_layer_name_add_list(inst, name, type, search_list, layer_list); 2238 name = next; 2239 } 2240 2241 return; 2242} 2243 2244void loader_deactivate_instance_layers(struct loader_instance *instance) 2245{ 2246 if (!instance->activated_layer_list.count) { 2247 return; 2248 } 2249 2250 /* Create instance chain of enabled layers */ 2251 for (uint32_t i = 0; i < instance->activated_layer_list.count; i++) { 2252 struct loader_layer_properties *layer_prop = &instance->activated_layer_list.list[i]; 2253 2254 loader_remove_layer_lib(instance, layer_prop); 2255 } 2256 loader_destroy_layer_list(instance, &instance->activated_layer_list); 2257} 2258 2259VkResult loader_enable_instance_layers( 2260 struct loader_instance *inst, 2261 const VkInstanceCreateInfo *pCreateInfo, 2262 const struct loader_layer_list *instance_layers) 2263{ 2264 VkResult err; 2265 2266 assert(inst && "Cannot have null instance"); 2267 2268 if (!loader_init_layer_list(inst, &inst->activated_layer_list)) { 2269 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Instance activated layer list"); 2270 return VK_ERROR_OUT_OF_HOST_MEMORY; 2271 } 2272 2273 /* Add any implicit layers first */ 2274 loader_add_layer_implicit( 2275 inst, 2276 VK_LAYER_TYPE_INSTANCE_IMPLICIT, 2277 &inst->activated_layer_list, 2278 instance_layers); 2279 2280 /* Add any layers specified via environment variable next */ 2281 loader_add_layer_env( 2282 inst, 2283 VK_LAYER_TYPE_INSTANCE_EXPLICIT, 2284 "VK_INSTANCE_LAYERS", 2285 &inst->activated_layer_list, 2286 instance_layers); 2287 2288 /* Add layers specified by the application */ 2289 err = loader_add_layer_names_to_list( 2290 inst, 2291 &inst->activated_layer_list, 2292 pCreateInfo->enabledLayerNameCount, 2293 pCreateInfo->ppEnabledLayerNames, 2294 instance_layers); 2295 2296 return err; 2297} 2298 2299uint32_t loader_activate_instance_layers(struct loader_instance *inst) 2300{ 2301 uint32_t layer_idx; 2302 VkBaseLayerObject *wrappedInstance; 2303 2304 if (inst == NULL) { 2305 return 0; 2306 } 2307 2308 // NOTE inst is unwrapped at this point in time 2309 void* baseObj = (void*) inst; 2310 void* nextObj = (void*) inst; 2311 VkBaseLayerObject *nextInstObj; 2312 PFN_vkGetInstanceProcAddr nextGPA = loader_gpa_instance_internal; 2313 2314 if (!inst->activated_layer_list.count) { 2315 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2316 return 0; 2317 } 2318 2319 wrappedInstance = loader_stack_alloc(sizeof(VkBaseLayerObject) 2320 * inst->activated_layer_list.count); 2321 if (!wrappedInstance) { 2322 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Instance objects for layer"); 2323 return 0; 2324 } 2325 2326 /* Create instance chain of enabled layers */ 2327 layer_idx = inst->activated_layer_list.count - 1; 2328 for (int32_t i = inst->activated_layer_list.count - 1; i >= 0; i--) { 2329 struct loader_layer_properties *layer_prop = &inst->activated_layer_list.list[i]; 2330 loader_platform_dl_handle lib_handle; 2331 2332 /* 2333 * Note: An extension's Get*ProcAddr should not return a function pointer for 2334 * any extension entry points until the extension has been enabled. 2335 * To do this requires a different behavior from Get*ProcAddr functions implemented 2336 * in layers. 2337 * The very first call to a layer will be it's Get*ProcAddr function requesting 2338 * the layer's vkGet*ProcAddr. The layer should initialize its internal dispatch table 2339 * with the wrapped object given (either Instance or Device) and return the layer's 2340 * Get*ProcAddr function. The layer should also use this opportunity to record the 2341 * baseObject so that it can find the correct local dispatch table on future calls. 2342 * Subsequent calls to Get*ProcAddr, CreateInstance, CreateDevice 2343 * will not use a wrapped object and must look up their local dispatch table from 2344 * the given baseObject. 2345 */ 2346 nextInstObj = (wrappedInstance + layer_idx); 2347 nextInstObj->pGPA = (PFN_vkGPA) nextGPA; 2348 nextInstObj->baseObject = baseObj; 2349 nextInstObj->nextObject = nextObj; 2350 nextObj = (void*) nextInstObj; 2351 2352 lib_handle = loader_add_layer_lib(inst, "instance", layer_prop); 2353 if ((nextGPA = layer_prop->functions.get_instance_proc_addr) == NULL) { 2354 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) { 2355 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); 2356 layer_prop->functions.get_instance_proc_addr = nextGPA; 2357 } else 2358 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa); 2359 if (!nextGPA) { 2360 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to find vkGetInstanceProcAddr in layer %s", layer_prop->lib_name); 2361 2362 /* TODO: Should we return nextObj, nextGPA to previous? or decrement layer_list count*/ 2363 continue; 2364 } 2365 } 2366 2367 loader_log(VK_DBG_REPORT_INFO_BIT, 0, 2368 "Insert instance layer %s (%s)", 2369 layer_prop->info.layerName, 2370 layer_prop->lib_name); 2371 2372 layer_idx--; 2373 } 2374 2375 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2376 2377 return inst->activated_layer_list.count; 2378} 2379 2380void loader_activate_instance_layer_extensions(struct loader_instance *inst) 2381{ 2382 2383 loader_init_instance_extension_dispatch_table(inst->disp, 2384 inst->disp->GetInstanceProcAddr, 2385 (VkInstance) inst); 2386} 2387 2388static VkResult loader_enable_device_layers( 2389 const struct loader_instance *inst, 2390 struct loader_icd *icd, 2391 struct loader_device *dev, 2392 const VkDeviceCreateInfo *pCreateInfo, 2393 const struct loader_layer_list *device_layers) 2394 2395{ 2396 VkResult err; 2397 2398 assert(dev && "Cannot have null device"); 2399 2400 if (dev->activated_layer_list.list == NULL || dev->activated_layer_list.capacity == 0) { 2401 loader_init_layer_list(inst, &dev->activated_layer_list); 2402 } 2403 2404 if (dev->activated_layer_list.list == NULL) { 2405 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc device activated layer list"); 2406 return VK_ERROR_OUT_OF_HOST_MEMORY; 2407 } 2408 2409 /* Add any implicit layers first */ 2410 loader_add_layer_implicit( 2411 inst, 2412 VK_LAYER_TYPE_DEVICE_IMPLICIT, 2413 &dev->activated_layer_list, 2414 device_layers); 2415 2416 /* Add any layers specified via environment variable next */ 2417 loader_add_layer_env( 2418 inst, 2419 VK_LAYER_TYPE_DEVICE_EXPLICIT, 2420 "VK_DEVICE_LAYERS", 2421 &dev->activated_layer_list, 2422 device_layers); 2423 2424 /* Add layers specified by the application */ 2425 err = loader_add_layer_names_to_list( 2426 inst, 2427 &dev->activated_layer_list, 2428 pCreateInfo->enabledLayerNameCount, 2429 pCreateInfo->ppEnabledLayerNames, 2430 device_layers); 2431 2432 return err; 2433} 2434 2435/* 2436 * This function terminates the device chain for CreateDevice. 2437 * CreateDevice is a special case and so the loader call's 2438 * the ICD's CreateDevice before creating the chain. Since 2439 * we can't call CreateDevice twice we must terminate the 2440 * device chain with something else. 2441 */ 2442static VkResult VKAPI scratch_vkCreateDevice( 2443 VkPhysicalDevice physicalDevice, 2444 const VkDeviceCreateInfo *pCreateInfo, 2445 const VkAllocationCallbacks* pAllocator, 2446 VkDevice *pDevice) 2447{ 2448 return VK_SUCCESS; 2449} 2450 2451static PFN_vkVoidFunction VKAPI loader_GetDeviceChainProcAddr(VkDevice device, const char * name) 2452{ 2453 if (!strcmp(name, "vkGetDeviceProcAddr")) 2454 return (PFN_vkVoidFunction) loader_GetDeviceChainProcAddr; 2455 if (!strcmp(name, "vkCreateDevice")) 2456 return (PFN_vkVoidFunction) scratch_vkCreateDevice; 2457 2458 struct loader_device *found_dev; 2459 struct loader_icd *icd = loader_get_icd_and_device(device, &found_dev); 2460 return icd->GetDeviceProcAddr(device, name); 2461} 2462 2463static uint32_t loader_activate_device_layers( 2464 const struct loader_instance *inst, 2465 struct loader_device *dev, 2466 VkDevice device) 2467{ 2468 if (!dev) { 2469 return 0; 2470 } 2471 2472 /* activate any layer libraries */ 2473 void* nextObj = (void*) device; 2474 void* baseObj = nextObj; 2475 VkBaseLayerObject *nextGpuObj; 2476 PFN_vkGetDeviceProcAddr nextGPA = loader_GetDeviceChainProcAddr; 2477 VkBaseLayerObject *wrappedGpus; 2478 2479 if (!dev->activated_layer_list.count) { 2480 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2481 (VkDevice) nextObj, (VkDevice) baseObj); 2482 return 0; 2483 } 2484 2485 wrappedGpus = loader_heap_alloc(inst, 2486 sizeof (VkBaseLayerObject) * dev->activated_layer_list.count, 2487 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2488 if (!wrappedGpus) { 2489 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Gpu objects for layer"); 2490 return 0; 2491 } 2492 2493 for (int32_t i = dev->activated_layer_list.count - 1; i >= 0; i--) { 2494 2495 struct loader_layer_properties *layer_prop = &dev->activated_layer_list.list[i]; 2496 loader_platform_dl_handle lib_handle; 2497 2498 nextGpuObj = (wrappedGpus + i); 2499 nextGpuObj->pGPA = (PFN_vkGPA)nextGPA; 2500 nextGpuObj->baseObject = baseObj; 2501 nextGpuObj->nextObject = nextObj; 2502 nextObj = (void*) nextGpuObj; 2503 2504 lib_handle = loader_add_layer_lib(inst, "device", layer_prop); 2505 if ((nextGPA = layer_prop->functions.get_device_proc_addr) == NULL) { 2506 if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) { 2507 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr"); 2508 layer_prop->functions.get_device_proc_addr = nextGPA; 2509 } else 2510 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa); 2511 if (!nextGPA) { 2512 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to find vkGetDeviceProcAddr in layer %s", layer_prop->lib_name); 2513 continue; 2514 } 2515 } 2516 2517 loader_log(VK_DBG_REPORT_INFO_BIT, 0, 2518 "Insert device layer library %s (%s)", 2519 layer_prop->info.layerName, 2520 layer_prop->lib_name); 2521 2522 } 2523 2524 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2525 (VkDevice) nextObj, (VkDevice) baseObj); 2526 loader_heap_free(inst, wrappedGpus); 2527 2528 return dev->activated_layer_list.count; 2529} 2530 2531VkResult loader_validate_layers( 2532 const uint32_t layer_count, 2533 const char * const *ppEnabledLayerNames, 2534 const struct loader_layer_list *list) 2535{ 2536 struct loader_layer_properties *prop; 2537 2538 for (uint32_t i = 0; i < layer_count; i++) { 2539 prop = loader_get_layer_property(ppEnabledLayerNames[i], 2540 list); 2541 if (!prop) { 2542 return VK_ERROR_LAYER_NOT_PRESENT; 2543 } 2544 } 2545 2546 return VK_SUCCESS; 2547} 2548 2549VkResult loader_validate_instance_extensions( 2550 const struct loader_extension_list *icd_exts, 2551 const struct loader_layer_list *instance_layer, 2552 const VkInstanceCreateInfo *pCreateInfo) 2553{ 2554 VkExtensionProperties *extension_prop; 2555 struct loader_layer_properties *layer_prop; 2556 2557 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 2558 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2559 icd_exts); 2560 2561 if (extension_prop) { 2562 continue; 2563 } 2564 2565 extension_prop = NULL; 2566 2567 /* Not in global list, search layer extension lists */ 2568 for (uint32_t j = 0; j < pCreateInfo->enabledLayerNameCount; j++) { 2569 layer_prop = loader_get_layer_property(pCreateInfo->ppEnabledLayerNames[i], 2570 instance_layer); 2571 if (!layer_prop) { 2572 /* Should NOT get here, loader_validate_layers 2573 * should have already filtered this case out. 2574 */ 2575 continue; 2576 } 2577 2578 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2579 &layer_prop->instance_extension_list); 2580 if (extension_prop) { 2581 /* Found the extension in one of the layers enabled by the app. */ 2582 break; 2583 } 2584 } 2585 2586 if (!extension_prop) { 2587 /* Didn't find extension name in any of the global layers, error out */ 2588 return VK_ERROR_EXTENSION_NOT_PRESENT; 2589 } 2590 } 2591 return VK_SUCCESS; 2592} 2593 2594VkResult loader_validate_device_extensions( 2595 struct loader_physical_device *phys_dev, 2596 const struct loader_layer_list *device_layer, 2597 const VkDeviceCreateInfo *pCreateInfo) 2598{ 2599 VkExtensionProperties *extension_prop; 2600 struct loader_layer_properties *layer_prop; 2601 2602 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 2603 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 2604 extension_prop = get_extension_property(extension_name, 2605 &phys_dev->device_extension_cache); 2606 2607 if (extension_prop) { 2608 continue; 2609 } 2610 2611 /* Not in global list, search layer extension lists */ 2612 for (uint32_t j = 0; j < pCreateInfo->enabledLayerNameCount; j++) { 2613 const char *layer_name = pCreateInfo->ppEnabledLayerNames[j]; 2614 layer_prop = loader_get_layer_property(layer_name, 2615 device_layer); 2616 2617 if (!layer_prop) { 2618 /* Should NOT get here, loader_validate_instance_layers 2619 * should have already filtered this case out. 2620 */ 2621 continue; 2622 } 2623 2624 extension_prop = get_extension_property(extension_name, 2625 &layer_prop->device_extension_list); 2626 if (extension_prop) { 2627 /* Found the extension in one of the layers enabled by the app. */ 2628 break; 2629 } 2630 } 2631 2632 if (!extension_prop) { 2633 /* Didn't find extension name in any of the device layers, error out */ 2634 return VK_ERROR_EXTENSION_NOT_PRESENT; 2635 } 2636 } 2637 return VK_SUCCESS; 2638} 2639 2640VkResult VKAPI loader_CreateInstance( 2641 const VkInstanceCreateInfo* pCreateInfo, 2642 const VkAllocationCallbacks* pAllocator, 2643 VkInstance* pInstance) 2644{ 2645 struct loader_instance *ptr_instance = *(struct loader_instance **) pInstance; 2646 struct loader_icd *icd; 2647 VkExtensionProperties *prop; 2648 char **filtered_extension_names = NULL; 2649 VkInstanceCreateInfo icd_create_info; 2650 VkResult res = VK_SUCCESS; 2651 bool success; 2652 2653 icd_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; 2654 icd_create_info.enabledLayerNameCount = 0; 2655 icd_create_info.ppEnabledLayerNames = NULL; 2656 icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo; 2657 icd_create_info.pNext = pCreateInfo->pNext; 2658 2659 /* 2660 * NOTE: Need to filter the extensions to only those 2661 * supported by the ICD. 2662 * No ICD will advertise support for layers. An ICD 2663 * library could support a layer, but it would be 2664 * independent of the actual ICD, just in the same library. 2665 */ 2666 filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionNameCount * sizeof(char *)); 2667 if (!filtered_extension_names) { 2668 return VK_ERROR_OUT_OF_HOST_MEMORY; 2669 } 2670 icd_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 2671 2672 for (uint32_t i = 0; i < ptr_instance->icd_libs.count; i++) { 2673 icd = loader_icd_add(ptr_instance, &ptr_instance->icd_libs.list[i]); 2674 if (icd) { 2675 icd_create_info.enabledExtensionNameCount = 0; 2676 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 2677 prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2678 &ptr_instance->ext_list); 2679 if (prop) { 2680 filtered_extension_names[icd_create_info.enabledExtensionNameCount] = (char *) pCreateInfo->ppEnabledExtensionNames[i]; 2681 icd_create_info.enabledExtensionNameCount++; 2682 } 2683 } 2684 2685 res = ptr_instance->icd_libs.list[i].CreateInstance(&icd_create_info, 2686 pAllocator, 2687 &(icd->instance)); 2688 success = loader_icd_init_entrys( 2689 icd, 2690 icd->instance, 2691 ptr_instance->icd_libs.list[i].GetInstanceProcAddr); 2692 2693 if (res != VK_SUCCESS || !success) 2694 { 2695 ptr_instance->icds = ptr_instance->icds->next; 2696 loader_icd_destroy(ptr_instance, icd); 2697 icd->instance = VK_NULL_HANDLE; 2698 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2699 "ICD ignored: failed to CreateInstance and find entrypoints with ICD"); 2700 } 2701 } 2702 } 2703 2704 /* 2705 * If no ICDs were added to instance list and res is unchanged 2706 * from it's initial value, the loader was unable to find 2707 * a suitable ICD. 2708 */ 2709 if (ptr_instance->icds == NULL) { 2710 if (res == VK_SUCCESS) { 2711 return VK_ERROR_INCOMPATIBLE_DRIVER; 2712 } else { 2713 return res; 2714 } 2715 } 2716 2717 return VK_SUCCESS; 2718} 2719 2720void VKAPI loader_DestroyInstance( 2721 VkInstance instance, 2722 const VkAllocationCallbacks* pAllocator) 2723{ 2724 struct loader_instance *ptr_instance = loader_instance(instance); 2725 struct loader_icd *icds = ptr_instance->icds; 2726 struct loader_icd *next_icd; 2727 2728 // Remove this instance from the list of instances: 2729 struct loader_instance *prev = NULL; 2730 struct loader_instance *next = loader.instances; 2731 while (next != NULL) { 2732 if (next == ptr_instance) { 2733 // Remove this instance from the list: 2734 if (prev) 2735 prev->next = next->next; 2736 else 2737 loader.instances = next->next; 2738 break; 2739 } 2740 prev = next; 2741 next = next->next; 2742 } 2743 2744 while (icds) { 2745 if (icds->instance) { 2746 icds->DestroyInstance(icds->instance, pAllocator); 2747 } 2748 next_icd = icds->next; 2749 icds->instance = VK_NULL_HANDLE; 2750 loader_icd_destroy(ptr_instance, icds); 2751 2752 icds = next_icd; 2753 } 2754 loader_delete_layer_properties(ptr_instance, &ptr_instance->device_layer_list); 2755 loader_delete_layer_properties(ptr_instance, &ptr_instance->instance_layer_list); 2756 loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_libs); 2757 loader_destroy_ext_list(ptr_instance, &ptr_instance->ext_list); 2758 for (uint32_t i = 0; i < ptr_instance->total_gpu_count; i++) 2759 loader_destroy_ext_list(ptr_instance, &ptr_instance->phys_devs[i].device_extension_cache); 2760 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 2761} 2762 2763VkResult loader_init_physical_device_info(struct loader_instance *ptr_instance) 2764{ 2765 struct loader_icd *icd; 2766 uint32_t i, j, idx, count = 0; 2767 VkResult res; 2768 struct loader_phys_dev_per_icd *phys_devs; 2769 2770 ptr_instance->total_gpu_count = 0; 2771 phys_devs = (struct loader_phys_dev_per_icd *) loader_stack_alloc( 2772 sizeof(struct loader_phys_dev_per_icd) * 2773 ptr_instance->total_icd_count); 2774 if (!phys_devs) 2775 return VK_ERROR_OUT_OF_HOST_MEMORY; 2776 2777 icd = ptr_instance->icds; 2778 for (i = 0; i < ptr_instance->total_icd_count; i++) { 2779 assert(icd); 2780 res = icd->EnumeratePhysicalDevices(icd->instance, &phys_devs[i].count, NULL); 2781 if (res != VK_SUCCESS) 2782 return res; 2783 count += phys_devs[i].count; 2784 icd = icd->next; 2785 } 2786 2787 ptr_instance->phys_devs = (struct loader_physical_device *) loader_heap_alloc( 2788 ptr_instance, 2789 count * sizeof(struct loader_physical_device), 2790 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2791 if (!ptr_instance->phys_devs) 2792 return VK_ERROR_OUT_OF_HOST_MEMORY; 2793 2794 icd = ptr_instance->icds; 2795 2796 struct loader_physical_device *inst_phys_devs = ptr_instance->phys_devs; 2797 idx = 0; 2798 for (i = 0; i < ptr_instance->total_icd_count; i++) { 2799 assert(icd); 2800 2801 phys_devs[i].phys_devs = (VkPhysicalDevice *) loader_stack_alloc( 2802 phys_devs[i].count * sizeof(VkPhysicalDevice)); 2803 if (!phys_devs[i].phys_devs) { 2804 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 2805 ptr_instance->phys_devs = NULL; 2806 return VK_ERROR_OUT_OF_HOST_MEMORY; 2807 } 2808 res = icd->EnumeratePhysicalDevices( 2809 icd->instance, 2810 &(phys_devs[i].count), 2811 phys_devs[i].phys_devs); 2812 if ((res == VK_SUCCESS)) { 2813 ptr_instance->total_gpu_count += phys_devs[i].count; 2814 for (j = 0; j < phys_devs[i].count; j++) { 2815 2816 // initialize the loader's physicalDevice object 2817 loader_set_dispatch((void *) &inst_phys_devs[idx], ptr_instance->disp); 2818 inst_phys_devs[idx].this_instance = ptr_instance; 2819 inst_phys_devs[idx].this_icd = icd; 2820 inst_phys_devs[idx].phys_dev = phys_devs[i].phys_devs[j]; 2821 memset(&inst_phys_devs[idx].device_extension_cache, 0, sizeof(struct loader_extension_list)); 2822 2823 idx++; 2824 } 2825 } else { 2826 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 2827 ptr_instance->phys_devs = NULL; 2828 return res; 2829 } 2830 2831 icd = icd->next; 2832 } 2833 2834 return VK_SUCCESS; 2835} 2836 2837VkResult VKAPI loader_EnumeratePhysicalDevices( 2838 VkInstance instance, 2839 uint32_t* pPhysicalDeviceCount, 2840 VkPhysicalDevice* pPhysicalDevices) 2841{ 2842 uint32_t i; 2843 struct loader_instance *ptr_instance = (struct loader_instance *) instance; 2844 VkResult res = VK_SUCCESS; 2845 2846 if (ptr_instance->total_gpu_count == 0) { 2847 res = loader_init_physical_device_info(ptr_instance); 2848 } 2849 2850 *pPhysicalDeviceCount = ptr_instance->total_gpu_count; 2851 if (!pPhysicalDevices) { 2852 return res; 2853 } 2854 2855 for (i = 0; i < ptr_instance->total_gpu_count; i++) { 2856 pPhysicalDevices[i] = (VkPhysicalDevice) &ptr_instance->phys_devs[i]; 2857 } 2858 2859 return res; 2860} 2861 2862void VKAPI loader_GetPhysicalDeviceProperties( 2863 VkPhysicalDevice physicalDevice, 2864 VkPhysicalDeviceProperties* pProperties) 2865{ 2866 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 2867 struct loader_icd *icd = phys_dev->this_icd; 2868 2869 if (icd->GetPhysicalDeviceProperties) 2870 icd->GetPhysicalDeviceProperties(phys_dev->phys_dev, pProperties); 2871} 2872 2873void VKAPI loader_GetPhysicalDeviceQueueFamilyProperties ( 2874 VkPhysicalDevice physicalDevice, 2875 uint32_t* pQueueFamilyPropertyCount, 2876 VkQueueFamilyProperties* pProperties) 2877{ 2878 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 2879 struct loader_icd *icd = phys_dev->this_icd; 2880 2881 if (icd->GetPhysicalDeviceQueueFamilyProperties) 2882 icd->GetPhysicalDeviceQueueFamilyProperties(phys_dev->phys_dev, pQueueFamilyPropertyCount, pProperties); 2883} 2884 2885void VKAPI loader_GetPhysicalDeviceMemoryProperties ( 2886 VkPhysicalDevice physicalDevice, 2887 VkPhysicalDeviceMemoryProperties* pProperties) 2888{ 2889 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 2890 struct loader_icd *icd = phys_dev->this_icd; 2891 2892 if (icd->GetPhysicalDeviceMemoryProperties) 2893 icd->GetPhysicalDeviceMemoryProperties(phys_dev->phys_dev, pProperties); 2894} 2895 2896void VKAPI loader_GetPhysicalDeviceFeatures( 2897 VkPhysicalDevice physicalDevice, 2898 VkPhysicalDeviceFeatures* pFeatures) 2899{ 2900 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 2901 struct loader_icd *icd = phys_dev->this_icd; 2902 2903 if (icd->GetPhysicalDeviceFeatures) 2904 icd->GetPhysicalDeviceFeatures(phys_dev->phys_dev, pFeatures); 2905} 2906 2907void VKAPI loader_GetPhysicalDeviceFormatProperties( 2908 VkPhysicalDevice physicalDevice, 2909 VkFormat format, 2910 VkFormatProperties* pFormatInfo) 2911{ 2912 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 2913 struct loader_icd *icd = phys_dev->this_icd; 2914 2915 if (icd->GetPhysicalDeviceFormatProperties) 2916 icd->GetPhysicalDeviceFormatProperties(phys_dev->phys_dev, format, pFormatInfo); 2917} 2918 2919VkResult VKAPI loader_GetPhysicalDeviceImageFormatProperties( 2920 VkPhysicalDevice physicalDevice, 2921 VkFormat format, 2922 VkImageType type, 2923 VkImageTiling tiling, 2924 VkImageUsageFlags usage, 2925 VkImageCreateFlags flags, 2926 VkImageFormatProperties* pImageFormatProperties) 2927{ 2928 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 2929 struct loader_icd *icd = phys_dev->this_icd; 2930 2931 if (!icd->GetPhysicalDeviceImageFormatProperties) 2932 return VK_ERROR_INITIALIZATION_FAILED; 2933 2934 return icd->GetPhysicalDeviceImageFormatProperties(phys_dev->phys_dev, format, 2935 type, tiling, usage, flags, pImageFormatProperties); 2936} 2937 2938void VKAPI loader_GetPhysicalDeviceSparseImageFormatProperties( 2939 VkPhysicalDevice physicalDevice, 2940 VkFormat format, 2941 VkImageType type, 2942 VkSampleCountFlagBits samples, 2943 VkImageUsageFlags usage, 2944 VkImageTiling tiling, 2945 uint32_t* pNumProperties, 2946 VkSparseImageFormatProperties* pProperties) 2947{ 2948 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 2949 struct loader_icd *icd = phys_dev->this_icd; 2950 2951 if (icd->GetPhysicalDeviceSparseImageFormatProperties) 2952 icd->GetPhysicalDeviceSparseImageFormatProperties(phys_dev->phys_dev, format, type, samples, usage, tiling, pNumProperties, pProperties); 2953} 2954 2955VkResult VKAPI loader_CreateDevice( 2956 VkPhysicalDevice physicalDevice, 2957 const VkDeviceCreateInfo* pCreateInfo, 2958 const VkAllocationCallbacks* pAllocator, 2959 VkDevice* pDevice) 2960{ 2961 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 2962 struct loader_icd *icd = phys_dev->this_icd; 2963 struct loader_device *dev; 2964 const struct loader_instance *inst; 2965 VkDeviceCreateInfo device_create_info; 2966 char **filtered_extension_names = NULL; 2967 VkResult res; 2968 2969 assert(pCreateInfo->requestedQueueCount >= 1); 2970 2971 if (!icd) 2972 return VK_ERROR_INITIALIZATION_FAILED; 2973 2974 inst = phys_dev->this_instance; 2975 2976 if (!icd->CreateDevice) { 2977 return VK_ERROR_INITIALIZATION_FAILED; 2978 } 2979 2980 /* validate any app enabled layers are available */ 2981 if (pCreateInfo->enabledLayerNameCount > 0) { 2982 res = loader_validate_layers(pCreateInfo->enabledLayerNameCount, 2983 pCreateInfo->ppEnabledLayerNames, 2984 &inst->device_layer_list); 2985 if (res != VK_SUCCESS) { 2986 return res; 2987 } 2988 } 2989 2990 /* Get the physical device extensions if they haven't been retrieved yet */ 2991 if (phys_dev->device_extension_cache.capacity == 0) { 2992 if (!loader_init_ext_list(inst, &phys_dev->device_extension_cache)) { 2993 return VK_ERROR_OUT_OF_HOST_MEMORY; 2994 } 2995 res = loader_add_physical_device_extensions( 2996 inst, physicalDevice, 2997 phys_dev->this_icd->this_icd_lib->lib_name, 2998 &phys_dev->device_extension_cache); 2999 if (res != VK_SUCCESS) { 3000 return res; 3001 } 3002 } 3003 /* make sure requested extensions to be enabled are supported */ 3004 res = loader_validate_device_extensions(phys_dev, &inst->device_layer_list, pCreateInfo); 3005 if (res != VK_SUCCESS) { 3006 return res; 3007 } 3008 3009 /* 3010 * NOTE: Need to filter the extensions to only those 3011 * supported by the ICD. 3012 * No ICD will advertise support for layers. An ICD 3013 * library could support a layer, but it would be 3014 * independent of the actual ICD, just in the same library. 3015 */ 3016 filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionNameCount * sizeof(char *)); 3017 if (!filtered_extension_names) { 3018 return VK_ERROR_OUT_OF_HOST_MEMORY; 3019 } 3020 3021 /* Copy user's data */ 3022 memcpy(&device_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo)); 3023 3024 /* ICD's do not use layers */ 3025 device_create_info.enabledLayerNameCount = 0; 3026 device_create_info.ppEnabledLayerNames = NULL; 3027 3028 device_create_info.enabledExtensionNameCount = 0; 3029 device_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 3030 3031 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 3032 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 3033 VkExtensionProperties *prop = get_extension_property(extension_name, 3034 &phys_dev->device_extension_cache); 3035 if (prop) { 3036 filtered_extension_names[device_create_info.enabledExtensionNameCount] = (char *) extension_name; 3037 device_create_info.enabledExtensionNameCount++; 3038 } 3039 } 3040 3041 // since physicalDevice object maybe wrapped by a layer need to get unwrapped version 3042 // we haven't yet called down the chain for the layer to unwrap the object 3043 res = icd->CreateDevice(phys_dev->phys_dev, pCreateInfo, pAllocator, pDevice); 3044 if (res != VK_SUCCESS) { 3045 return res; 3046 } 3047 3048 dev = loader_add_logical_device(inst, *pDevice, &icd->logical_device_list); 3049 if (dev == NULL) { 3050 return VK_ERROR_OUT_OF_HOST_MEMORY; 3051 } 3052 PFN_vkGetDeviceProcAddr get_proc_addr = icd->GetDeviceProcAddr; 3053 loader_init_device_dispatch_table(&dev->loader_dispatch, get_proc_addr, 3054 *pDevice, *pDevice); 3055 3056 dev->loader_dispatch.CreateDevice = scratch_vkCreateDevice; 3057 loader_init_dispatch(*pDevice, &dev->loader_dispatch); 3058 3059 /* activate any layers on device chain which terminates with device*/ 3060 res = loader_enable_device_layers(inst, icd, dev, pCreateInfo, &inst->device_layer_list); 3061 if (res != VK_SUCCESS) { 3062 loader_destroy_logical_device(inst, dev); 3063 return res; 3064 } 3065 loader_activate_device_layers(inst, dev, *pDevice); 3066 3067 res = dev->loader_dispatch.CreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice); 3068 3069 dev->loader_dispatch.CreateDevice = icd->CreateDevice; 3070 3071 return res; 3072} 3073 3074/** 3075 * Get an instance level or global level entry point address. 3076 * @param instance 3077 * @param pName 3078 * @return 3079 * If instance == NULL returns a global level functions only 3080 * If instance is valid returns a trampoline entry point for all dispatchable Vulkan 3081 * functions both core and extensions. 3082 */ 3083LOADER_EXPORT PFN_vkVoidFunction VKAPI vkGetInstanceProcAddr(VkInstance instance, const char * pName) 3084{ 3085 3086 void *addr; 3087 3088 addr = globalGetProcAddr(pName); 3089 if (instance == VK_NULL_HANDLE) { 3090 // get entrypoint addresses that are global (no dispatchable object) 3091 3092 return addr; 3093 } else { 3094 // if a global entrypoint return NULL 3095 if (addr) 3096 return NULL; 3097 } 3098 3099 struct loader_instance *ptr_instance = loader_get_instance(instance); 3100 if (ptr_instance == NULL) 3101 return NULL; 3102 // Return trampoline code for non-global entrypoints including any extensions. 3103 // Device extensions are returned if a layer or ICD supports the extension. 3104 // Instance extensions are returned if the extension is enabled and the loader 3105 // or someone else supports the extension 3106 return trampolineGetProcAddr(ptr_instance, pName); 3107 3108} 3109 3110/** 3111 * Get a device level or global level entry point address. 3112 * @param device 3113 * @param pName 3114 * @return 3115 * If device is valid, returns a device relative entry point for device level 3116 * entry points both core and extensions. 3117 * Device relative means call down the device chain. 3118 */ 3119LOADER_EXPORT PFN_vkVoidFunction VKAPI vkGetDeviceProcAddr(VkDevice device, const char * pName) 3120{ 3121 void *addr; 3122 3123 /* for entrypoints that loader must handle (ie non-dispatchable or create object) 3124 make sure the loader entrypoint is returned */ 3125 addr = loader_non_passthrough_gdpa(pName); 3126 if (addr) { 3127 return addr; 3128 } 3129 3130 /* Although CreateDevice is on device chain it's dispatchable object isn't 3131 * a VkDevice or child of VkDevice so return NULL. 3132 */ 3133 if (!strcmp(pName, "CreateDevice")) 3134 return NULL; 3135 3136 /* return the dispatch table entrypoint for the fastest case */ 3137 const VkLayerDispatchTable *disp_table = * (VkLayerDispatchTable **) device; 3138 if (disp_table == NULL) 3139 return NULL; 3140 3141 addr = loader_lookup_device_dispatch_table(disp_table, pName); 3142 if (addr) 3143 return addr; 3144 3145 if (disp_table->GetDeviceProcAddr == NULL) 3146 return NULL; 3147 return disp_table->GetDeviceProcAddr(device, pName); 3148} 3149 3150LOADER_EXPORT VkResult VKAPI vkEnumerateInstanceExtensionProperties( 3151 const char* pLayerName, 3152 uint32_t* pPropertyCount, 3153 VkExtensionProperties* pProperties) 3154{ 3155 struct loader_extension_list *global_ext_list=NULL; 3156 struct loader_layer_list instance_layers; 3157 struct loader_extension_list icd_extensions; 3158 struct loader_icd_libs icd_libs; 3159 uint32_t copy_size; 3160 3161 tls_instance = NULL; 3162 memset(&icd_extensions, 0, sizeof(icd_extensions)); 3163 memset(&instance_layers, 0, sizeof(instance_layers)); 3164 loader_platform_thread_once(&once_init, loader_initialize); 3165 3166 /* get layer libraries if needed */ 3167 if (pLayerName && strlen(pLayerName) != 0) { 3168 loader_layer_scan(NULL, &instance_layers, NULL); 3169 for (uint32_t i = 0; i < instance_layers.count; i++) { 3170 struct loader_layer_properties *props = &instance_layers.list[i]; 3171 if (strcmp(props->info.layerName, pLayerName) == 0) { 3172 global_ext_list = &props->instance_extension_list; 3173 } 3174 } 3175 } 3176 else { 3177 /* Scan/discover all ICD libraries */ 3178 memset(&icd_libs, 0 , sizeof(struct loader_icd_libs)); 3179 loader_icd_scan(NULL, &icd_libs); 3180 /* get extensions from all ICD's, merge so no duplicates */ 3181 loader_get_icd_loader_instance_extensions(NULL, &icd_libs, &icd_extensions); 3182 loader_scanned_icd_clear(NULL, &icd_libs); 3183 global_ext_list = &icd_extensions; 3184 } 3185 3186 if (global_ext_list == NULL) { 3187 loader_destroy_layer_list(NULL, &instance_layers); 3188 return VK_ERROR_LAYER_NOT_PRESENT; 3189 } 3190 3191 if (pProperties == NULL) { 3192 *pPropertyCount = global_ext_list->count; 3193 loader_destroy_layer_list(NULL, &instance_layers); 3194 loader_destroy_ext_list(NULL, &icd_extensions); 3195 return VK_SUCCESS; 3196 } 3197 3198 copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count; 3199 for (uint32_t i = 0; i < copy_size; i++) { 3200 memcpy(&pProperties[i], 3201 &global_ext_list->list[i], 3202 sizeof(VkExtensionProperties)); 3203 } 3204 *pPropertyCount = copy_size; 3205 loader_destroy_ext_list(NULL, &icd_extensions); 3206 3207 if (copy_size < global_ext_list->count) { 3208 loader_destroy_layer_list(NULL, &instance_layers); 3209 return VK_INCOMPLETE; 3210 } 3211 3212 loader_destroy_layer_list(NULL, &instance_layers); 3213 return VK_SUCCESS; 3214} 3215 3216LOADER_EXPORT VkResult VKAPI vkEnumerateInstanceLayerProperties( 3217 uint32_t* pPropertyCount, 3218 VkLayerProperties* pProperties) 3219{ 3220 3221 struct loader_layer_list instance_layer_list; 3222 tls_instance = NULL; 3223 3224 loader_platform_thread_once(&once_init, loader_initialize); 3225 3226 uint32_t copy_size; 3227 3228 /* get layer libraries */ 3229 memset(&instance_layer_list, 0, sizeof(instance_layer_list)); 3230 loader_layer_scan(NULL, &instance_layer_list, NULL); 3231 3232 if (pProperties == NULL) { 3233 *pPropertyCount = instance_layer_list.count; 3234 loader_destroy_layer_list(NULL, &instance_layer_list); 3235 return VK_SUCCESS; 3236 } 3237 3238 copy_size = (*pPropertyCount < instance_layer_list.count) ? *pPropertyCount : instance_layer_list.count; 3239 for (uint32_t i = 0; i < copy_size; i++) { 3240 memcpy(&pProperties[i], &instance_layer_list.list[i].info, sizeof(VkLayerProperties)); 3241 } 3242 *pPropertyCount = copy_size; 3243 loader_destroy_layer_list(NULL, &instance_layer_list); 3244 3245 if (copy_size < instance_layer_list.count) { 3246 return VK_INCOMPLETE; 3247 } 3248 3249 return VK_SUCCESS; 3250} 3251 3252VkResult VKAPI loader_EnumerateDeviceExtensionProperties( 3253 VkPhysicalDevice physicalDevice, 3254 const char* pLayerName, 3255 uint32_t* pPropertyCount, 3256 VkExtensionProperties* pProperties) 3257{ 3258 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3259 uint32_t copy_size; 3260 3261 uint32_t count; 3262 struct loader_extension_list *dev_ext_list=NULL; 3263 3264 /* get layer libraries if needed */ 3265 if (pLayerName && strlen(pLayerName) != 0) { 3266 for (uint32_t i = 0; i < phys_dev->this_instance->device_layer_list.count; i++) { 3267 struct loader_layer_properties *props = &phys_dev->this_instance->device_layer_list.list[i]; 3268 if (strcmp(props->info.layerName, pLayerName) == 0) { 3269 dev_ext_list = &props->device_extension_list; 3270 } 3271 } 3272 } 3273 else { 3274 /* this case is during the call down the instance chain */ 3275 struct loader_icd *icd = phys_dev->this_icd; 3276 VkResult res; 3277 res = icd->EnumerateDeviceExtensionProperties(phys_dev->phys_dev, NULL, pPropertyCount, pProperties); 3278 if (pProperties != NULL && res == VK_SUCCESS) { 3279 /* initialize dev_extension list within the physicalDevice object */ 3280 res = loader_init_physical_device_extensions(phys_dev->this_instance, 3281 phys_dev, *pPropertyCount, pProperties, 3282 &phys_dev->device_extension_cache); 3283 } 3284 return res; 3285 } 3286 3287 count = (dev_ext_list == NULL) ? 0: dev_ext_list->count; 3288 if (pProperties == NULL) { 3289 *pPropertyCount = count; 3290 return VK_SUCCESS; 3291 } 3292 3293 copy_size = *pPropertyCount < count ? *pPropertyCount : count; 3294 for (uint32_t i = 0; i < copy_size; i++) { 3295 memcpy(&pProperties[i], 3296 &dev_ext_list->list[i], 3297 sizeof(VkExtensionProperties)); 3298 } 3299 *pPropertyCount = copy_size; 3300 3301 if (copy_size < count) { 3302 return VK_INCOMPLETE; 3303 } 3304 3305 return VK_SUCCESS; 3306} 3307 3308VkResult VKAPI loader_EnumerateDeviceLayerProperties( 3309 VkPhysicalDevice physicalDevice, 3310 uint32_t* pPropertyCount, 3311 VkLayerProperties* pProperties) 3312{ 3313 uint32_t copy_size; 3314 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3315 3316 uint32_t count = phys_dev->this_instance->device_layer_list.count; 3317 3318 if (pProperties == NULL) { 3319 *pPropertyCount = count; 3320 return VK_SUCCESS; 3321 } 3322 3323 copy_size = (*pPropertyCount < count) ? *pPropertyCount : count; 3324 for (uint32_t i = 0; i < copy_size; i++) { 3325 memcpy(&pProperties[i], &(phys_dev->this_instance->device_layer_list.list[i].info), sizeof(VkLayerProperties)); 3326 } 3327 *pPropertyCount = copy_size; 3328 3329 if (copy_size < count) { 3330 return VK_INCOMPLETE; 3331 } 3332 3333 return VK_SUCCESS; 3334} 3335