loader.c revision c50a5987320b2578adba60ae7071a099675977e8
1/* 2 * 3 * Copyright (c) 2014-2016 The Khronos Group Inc. 4 * Copyright (c) 2014-2016 Valve Corporation 5 * Copyright (c) 2014-2016 LunarG, Inc. 6 * Copyright (C) 2015 Google Inc. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and/or associated documentation files (the "Materials"), to 10 * deal in the Materials without restriction, including without limitation the 11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 12 * sell copies of the Materials, and to permit persons to whom the Materials are 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice(s) and this permission notice shall be included in 16 * all copies or substantial portions of the Materials. 17 * 18 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 21 * 22 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE 25 * USE OR OTHER DEALINGS IN THE MATERIALS. 26 * 27 * Author: Jon Ashburn <jon@lunarg.com> 28 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com> 29 * 30 */ 31 32#define _GNU_SOURCE 33#include <stdio.h> 34#include <stdlib.h> 35#include <stdarg.h> 36#include <stdbool.h> 37#include <string.h> 38 39#include <sys/types.h> 40#if defined(_WIN32) 41#include "dirent_on_windows.h" 42#else // _WIN32 43#include <dirent.h> 44#endif // _WIN32 45#include "vk_loader_platform.h" 46#include "loader.h" 47#include "gpa_helper.h" 48#include "table_ops.h" 49#include "debug_report.h" 50#include "wsi.h" 51#include "vulkan/vk_icd.h" 52#include "cJSON.h" 53#include "murmurhash.h" 54 55static loader_platform_dl_handle 56loader_add_layer_lib(const struct loader_instance *inst, const char *chain_type, 57 struct loader_layer_properties *layer_prop); 58 59static void loader_remove_layer_lib(struct loader_instance *inst, 60 struct loader_layer_properties *layer_prop); 61 62struct loader_struct loader = {0}; 63// TLS for instance for alloc/free callbacks 64THREAD_LOCAL_DECL struct loader_instance *tls_instance; 65 66static bool loader_init_generic_list(const struct loader_instance *inst, 67 struct loader_generic_list *list_info, 68 size_t element_size); 69 70static size_t loader_platform_combine_path(char *dest, size_t len, ...); 71 72struct loader_phys_dev_per_icd { 73 uint32_t count; 74 VkPhysicalDevice *phys_devs; 75}; 76 77enum loader_debug { 78 LOADER_INFO_BIT = 0x01, 79 LOADER_WARN_BIT = 0x02, 80 LOADER_PERF_BIT = 0x04, 81 LOADER_ERROR_BIT = 0x08, 82 LOADER_DEBUG_BIT = 0x10, 83}; 84 85uint32_t g_loader_debug = 0; 86uint32_t g_loader_log_msgs = 0; 87 88// thread safety lock for accessing global data structures such as "loader" 89// all entrypoints on the instance chain need to be locked except GPA 90// additionally CreateDevice and DestroyDevice needs to be locked 91loader_platform_thread_mutex loader_lock; 92loader_platform_thread_mutex loader_json_lock; 93 94const char *std_validation_str = "VK_LAYER_LUNARG_standard_validation"; 95 96// This table contains the loader's instance dispatch table, which contains 97// default functions if no instance layers are activated. This contains 98// pointers to "terminator functions". 99const VkLayerInstanceDispatchTable instance_disp = { 100 .GetInstanceProcAddr = vkGetInstanceProcAddr, 101 .DestroyInstance = loader_DestroyInstance, 102 .EnumeratePhysicalDevices = loader_EnumeratePhysicalDevices, 103 .GetPhysicalDeviceFeatures = loader_GetPhysicalDeviceFeatures, 104 .GetPhysicalDeviceFormatProperties = 105 loader_GetPhysicalDeviceFormatProperties, 106 .GetPhysicalDeviceImageFormatProperties = 107 loader_GetPhysicalDeviceImageFormatProperties, 108 .GetPhysicalDeviceProperties = loader_GetPhysicalDeviceProperties, 109 .GetPhysicalDeviceQueueFamilyProperties = 110 loader_GetPhysicalDeviceQueueFamilyProperties, 111 .GetPhysicalDeviceMemoryProperties = 112 loader_GetPhysicalDeviceMemoryProperties, 113 .EnumerateDeviceExtensionProperties = 114 loader_EnumerateDeviceExtensionProperties, 115 .EnumerateDeviceLayerProperties = loader_EnumerateDeviceLayerProperties, 116 .GetPhysicalDeviceSparseImageFormatProperties = 117 loader_GetPhysicalDeviceSparseImageFormatProperties, 118 .DestroySurfaceKHR = loader_DestroySurfaceKHR, 119 .GetPhysicalDeviceSurfaceSupportKHR = 120 loader_GetPhysicalDeviceSurfaceSupportKHR, 121 .GetPhysicalDeviceSurfaceCapabilitiesKHR = 122 loader_GetPhysicalDeviceSurfaceCapabilitiesKHR, 123 .GetPhysicalDeviceSurfaceFormatsKHR = 124 loader_GetPhysicalDeviceSurfaceFormatsKHR, 125 .GetPhysicalDeviceSurfacePresentModesKHR = 126 loader_GetPhysicalDeviceSurfacePresentModesKHR, 127 .CreateDebugReportCallbackEXT = loader_CreateDebugReportCallback, 128 .DestroyDebugReportCallbackEXT = loader_DestroyDebugReportCallback, 129 .DebugReportMessageEXT = loader_DebugReportMessage, 130#ifdef VK_USE_PLATFORM_MIR_KHR 131 .CreateMirSurfaceKHR = loader_CreateMirSurfaceKHR, 132 .GetPhysicalDeviceMirPresentationSupportKHR = 133 loader_GetPhysicalDeviceMirPresentationSupportKHR, 134#endif 135#ifdef VK_USE_PLATFORM_WAYLAND_KHR 136 .CreateWaylandSurfaceKHR = loader_CreateWaylandSurfaceKHR, 137 .GetPhysicalDeviceWaylandPresentationSupportKHR = 138 loader_GetPhysicalDeviceWaylandPresentationSupportKHR, 139#endif 140#ifdef VK_USE_PLATFORM_WIN32_KHR 141 .CreateWin32SurfaceKHR = loader_CreateWin32SurfaceKHR, 142 .GetPhysicalDeviceWin32PresentationSupportKHR = 143 loader_GetPhysicalDeviceWin32PresentationSupportKHR, 144#endif 145#ifdef VK_USE_PLATFORM_XCB_KHR 146 .CreateXcbSurfaceKHR = loader_CreateXcbSurfaceKHR, 147 .GetPhysicalDeviceXcbPresentationSupportKHR = 148 loader_GetPhysicalDeviceXcbPresentationSupportKHR, 149#endif 150#ifdef VK_USE_PLATFORM_XLIB_KHR 151 .CreateXlibSurfaceKHR = loader_CreateXlibSurfaceKHR, 152 .GetPhysicalDeviceXlibPresentationSupportKHR = 153 loader_GetPhysicalDeviceXlibPresentationSupportKHR, 154#endif 155#ifdef VK_USE_PLATFORM_ANDROID_KHR 156 .CreateAndroidSurfaceKHR = loader_CreateAndroidSurfaceKHR, 157#endif 158}; 159 160LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init); 161 162void *loader_heap_alloc(const struct loader_instance *instance, size_t size, 163 VkSystemAllocationScope alloc_scope) { 164 if (instance && instance->alloc_callbacks.pfnAllocation) { 165 /* TODO: What should default alignment be? 1, 4, 8, other? */ 166 return instance->alloc_callbacks.pfnAllocation( 167 instance->alloc_callbacks.pUserData, size, sizeof(int), 168 alloc_scope); 169 } 170 return malloc(size); 171} 172 173void loader_heap_free(const struct loader_instance *instance, void *pMemory) { 174 if (pMemory == NULL) 175 return; 176 if (instance && instance->alloc_callbacks.pfnFree) { 177 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, 178 pMemory); 179 return; 180 } 181 free(pMemory); 182} 183 184void *loader_heap_realloc(const struct loader_instance *instance, void *pMemory, 185 size_t orig_size, size_t size, 186 VkSystemAllocationScope alloc_scope) { 187 if (pMemory == NULL || orig_size == 0) 188 return loader_heap_alloc(instance, size, alloc_scope); 189 if (size == 0) { 190 loader_heap_free(instance, pMemory); 191 return NULL; 192 } 193 // TODO use the callback realloc function 194 if (instance && instance->alloc_callbacks.pfnAllocation) { 195 if (size <= orig_size) { 196 memset(((uint8_t *)pMemory) + size, 0, orig_size - size); 197 return pMemory; 198 } 199 /* TODO: What should default alignment be? 1, 4, 8, other? */ 200 void *new_ptr = instance->alloc_callbacks.pfnAllocation( 201 instance->alloc_callbacks.pUserData, size, sizeof(int), 202 alloc_scope); 203 if (!new_ptr) 204 return NULL; 205 memcpy(new_ptr, pMemory, orig_size); 206 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, 207 pMemory); 208 return new_ptr; 209 } 210 return realloc(pMemory, size); 211} 212 213void *loader_tls_heap_alloc(size_t size) { 214 return loader_heap_alloc(tls_instance, size, 215 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 216} 217 218void loader_tls_heap_free(void *pMemory) { 219 loader_heap_free(tls_instance, pMemory); 220} 221 222void loader_log(const struct loader_instance *inst, VkFlags msg_type, 223 int32_t msg_code, const char *format, ...) { 224 char msg[512]; 225 va_list ap; 226 int ret; 227 228 va_start(ap, format); 229 ret = vsnprintf(msg, sizeof(msg), format, ap); 230 if ((ret >= (int)sizeof(msg)) || ret < 0) { 231 msg[sizeof(msg) - 1] = '\0'; 232 } 233 va_end(ap); 234 235 if (inst) { 236 util_DebugReportMessage(inst, msg_type, 237 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 238 (uint64_t)inst, 0, msg_code, "loader", msg); 239 } 240 241 if (!(msg_type & g_loader_log_msgs)) { 242 return; 243 } 244 245#if defined(WIN32) 246 OutputDebugString(msg); 247 OutputDebugString("\n"); 248#endif 249 fputs(msg, stderr); 250 fputc('\n', stderr); 251} 252 253#if defined(WIN32) 254static char *loader_get_next_path(char *path); 255/** 256* Find the list of registry files (names within a key) in key "location". 257* 258* This function looks in the registry (hive = DEFAULT_VK_REGISTRY_HIVE) key as 259*given in "location" 260* for a list or name/values which are added to a returned list (function return 261*value). 262* The DWORD values within the key must be 0 or they are skipped. 263* Function return is a string with a ';' separated list of filenames. 264* Function return is NULL if no valid name/value pairs are found in the key, 265* or the key is not found. 266* 267* \returns 268* A string list of filenames as pointer. 269* When done using the returned string list, pointer should be freed. 270*/ 271static char *loader_get_registry_files(const struct loader_instance *inst, 272 char *location) { 273 LONG rtn_value; 274 HKEY hive, key; 275 DWORD access_flags; 276 char name[2048]; 277 char *out = NULL; 278 char *loc = location; 279 char *next; 280 DWORD idx = 0; 281 DWORD name_size = sizeof(name); 282 DWORD value; 283 DWORD total_size = 4096; 284 DWORD value_size = sizeof(value); 285 286 while (*loc) { 287 next = loader_get_next_path(loc); 288 hive = DEFAULT_VK_REGISTRY_HIVE; 289 access_flags = KEY_QUERY_VALUE; 290 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 291 if (rtn_value != ERROR_SUCCESS) { 292 // We still couldn't find the key, so give up: 293 loc = next; 294 continue; 295 } 296 297 while ((rtn_value = RegEnumValue(key, idx++, name, &name_size, NULL, 298 NULL, (LPBYTE)&value, &value_size)) == 299 ERROR_SUCCESS) { 300 if (value_size == sizeof(value) && value == 0) { 301 if (out == NULL) { 302 out = loader_heap_alloc( 303 inst, total_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 304 out[0] = '\0'; 305 } else if (strlen(out) + name_size + 1 > total_size) { 306 out = loader_heap_realloc( 307 inst, out, total_size, total_size * 2, 308 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 309 total_size *= 2; 310 } 311 if (out == NULL) { 312 loader_log( 313 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 314 "Out of memory, failed loader_get_registry_files"); 315 return NULL; 316 } 317 if (strlen(out) == 0) 318 snprintf(out, name_size + 1, "%s", name); 319 else 320 snprintf(out + strlen(out), name_size + 2, "%c%s", 321 PATH_SEPERATOR, name); 322 } 323 name_size = 2048; 324 } 325 loc = next; 326 } 327 328 return out; 329} 330 331#endif // WIN32 332 333/** 334 * Combine path elements, separating each element with the platform-specific 335 * directory separator, and save the combined string to a destination buffer, 336 * not exceeding the given length. Path elements are given as variadic args, 337 * with a NULL element terminating the list. 338 * 339 * \returns the total length of the combined string, not including an ASCII 340 * NUL termination character. This length may exceed the available storage: 341 * in this case, the written string will be truncated to avoid a buffer 342 * overrun, and the return value will greater than or equal to the storage 343 * size. A NULL argument may be provided as the destination buffer in order 344 * to determine the required string length without actually writing a string. 345 */ 346 347static size_t loader_platform_combine_path(char *dest, size_t len, ...) { 348 size_t required_len = 0; 349 va_list ap; 350 const char *component; 351 352 va_start(ap, len); 353 354 while ((component = va_arg(ap, const char *))) { 355 if (required_len > 0) { 356 // This path element is not the first non-empty element; prepend 357 // a directory separator if space allows 358 if (dest && required_len + 1 < len) { 359 snprintf(dest + required_len, len - required_len, "%c", 360 DIRECTORY_SYMBOL); 361 } 362 required_len++; 363 } 364 365 if (dest && required_len < len) { 366 strncpy(dest + required_len, component, len - required_len); 367 } 368 required_len += strlen(component); 369 } 370 371 va_end(ap); 372 373 // strncpy(3) won't add a NUL terminating byte in the event of truncation. 374 if (dest && required_len >= len) { 375 dest[len - 1] = '\0'; 376 } 377 378 return required_len; 379} 380 381/** 382 * Given string of three part form "maj.min.pat" convert to a vulkan version 383 * number. 384 */ 385static uint32_t loader_make_version(const char *vers_str) { 386 uint32_t vers = 0, major = 0, minor = 0, patch = 0; 387 char *minor_str = NULL; 388 char *patch_str = NULL; 389 char *cstr; 390 char *str; 391 392 if (!vers_str) 393 return vers; 394 cstr = loader_stack_alloc(strlen(vers_str) + 1); 395 strcpy(cstr, vers_str); 396 while ((str = strchr(cstr, '.')) != NULL) { 397 if (minor_str == NULL) { 398 minor_str = str + 1; 399 *str = '\0'; 400 major = atoi(cstr); 401 } else if (patch_str == NULL) { 402 patch_str = str + 1; 403 *str = '\0'; 404 minor = atoi(minor_str); 405 } else { 406 return vers; 407 } 408 cstr = str + 1; 409 } 410 patch = atoi(patch_str); 411 412 return VK_MAKE_VERSION(major, minor, patch); 413} 414 415bool compare_vk_extension_properties(const VkExtensionProperties *op1, 416 const VkExtensionProperties *op2) { 417 return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false; 418} 419 420/** 421 * Search the given ext_array for an extension 422 * matching the given vk_ext_prop 423 */ 424bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, 425 const uint32_t count, 426 const VkExtensionProperties *ext_array) { 427 for (uint32_t i = 0; i < count; i++) { 428 if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) 429 return true; 430 } 431 return false; 432} 433 434/** 435 * Search the given ext_list for an extension 436 * matching the given vk_ext_prop 437 */ 438bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, 439 const struct loader_extension_list *ext_list) { 440 for (uint32_t i = 0; i < ext_list->count; i++) { 441 if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) 442 return true; 443 } 444 return false; 445} 446 447static inline bool loader_is_layer_type_device(const enum layer_type type) { 448 if ((type & VK_LAYER_TYPE_DEVICE_EXPLICIT) || 449 (type & VK_LAYER_TYPE_DEVICE_IMPLICIT)) 450 return true; 451 return false; 452} 453 454/* 455 * Search the given layer list for a layer matching the given layer name 456 */ 457static struct loader_layer_properties * 458loader_get_layer_property(const char *name, 459 const struct loader_layer_list *layer_list) { 460 for (uint32_t i = 0; i < layer_list->count; i++) { 461 const VkLayerProperties *item = &layer_list->list[i].info; 462 if (strcmp(name, item->layerName) == 0) 463 return &layer_list->list[i]; 464 } 465 return NULL; 466} 467 468/** 469 * Get the next unused layer property in the list. Init the property to zero. 470 */ 471static struct loader_layer_properties * 472loader_get_next_layer_property(const struct loader_instance *inst, 473 struct loader_layer_list *layer_list) { 474 if (layer_list->capacity == 0) { 475 layer_list->list = 476 loader_heap_alloc(inst, sizeof(struct loader_layer_properties) * 64, 477 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 478 if (layer_list->list == NULL) { 479 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 480 "Out of memory can't add any layer properties to list"); 481 return NULL; 482 } 483 memset(layer_list->list, 0, 484 sizeof(struct loader_layer_properties) * 64); 485 layer_list->capacity = sizeof(struct loader_layer_properties) * 64; 486 } 487 488 // ensure enough room to add an entry 489 if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > 490 layer_list->capacity) { 491 layer_list->list = loader_heap_realloc( 492 inst, layer_list->list, layer_list->capacity, 493 layer_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 494 if (layer_list->list == NULL) { 495 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 496 "realloc failed for layer list"); 497 } 498 layer_list->capacity *= 2; 499 } 500 501 layer_list->count++; 502 return &(layer_list->list[layer_list->count - 1]); 503} 504 505/** 506 * Remove all layer properties entrys from the list 507 */ 508void loader_delete_layer_properties(const struct loader_instance *inst, 509 struct loader_layer_list *layer_list) { 510 uint32_t i, j; 511 struct loader_device_extension_list *dev_ext_list; 512 if (!layer_list) 513 return; 514 515 for (i = 0; i < layer_list->count; i++) { 516 loader_destroy_generic_list( 517 inst, (struct loader_generic_list *)&layer_list->list[i] 518 .instance_extension_list); 519 dev_ext_list = &layer_list->list[i].device_extension_list; 520 if (dev_ext_list->capacity > 0 && 521 dev_ext_list->list->entrypoint_count > 0) { 522 for (j = 0; j < dev_ext_list->list->entrypoint_count; j++) { 523 loader_heap_free(inst, dev_ext_list->list->entrypoints[j]); 524 } 525 loader_heap_free(inst, dev_ext_list->list->entrypoints); 526 } 527 loader_destroy_generic_list(inst, 528 (struct loader_generic_list *)dev_ext_list); 529 } 530 layer_list->count = 0; 531 532 if (layer_list->capacity > 0) { 533 layer_list->capacity = 0; 534 loader_heap_free(inst, layer_list->list); 535 } 536} 537 538static void loader_add_instance_extensions( 539 const struct loader_instance *inst, 540 const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, 541 const char *lib_name, struct loader_extension_list *ext_list) { 542 uint32_t i, count = 0; 543 VkExtensionProperties *ext_props; 544 VkResult res; 545 546 if (!fp_get_props) { 547 /* No EnumerateInstanceExtensionProperties defined */ 548 return; 549 } 550 551 res = fp_get_props(NULL, &count, NULL); 552 if (res != VK_SUCCESS) { 553 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 554 "Error getting Instance extension count from %s", lib_name); 555 return; 556 } 557 558 if (count == 0) { 559 /* No ExtensionProperties to report */ 560 return; 561 } 562 563 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); 564 565 res = fp_get_props(NULL, &count, ext_props); 566 if (res != VK_SUCCESS) { 567 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 568 "Error getting Instance extensions from %s", lib_name); 569 return; 570 } 571 572 for (i = 0; i < count; i++) { 573 char spec_version[64]; 574 575 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 576 VK_MAJOR(ext_props[i].specVersion), 577 VK_MINOR(ext_props[i].specVersion), 578 VK_PATCH(ext_props[i].specVersion)); 579 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 580 "Instance Extension: %s (%s) version %s", 581 ext_props[i].extensionName, lib_name, spec_version); 582 loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 583 } 584 585 return; 586} 587 588/* 589 * Initialize ext_list with the physical device extensions. 590 * The extension properties are passed as inputs in count and ext_props. 591 */ 592static VkResult 593loader_init_device_extensions(const struct loader_instance *inst, 594 struct loader_physical_device *phys_dev, 595 uint32_t count, VkExtensionProperties *ext_props, 596 struct loader_extension_list *ext_list) { 597 VkResult res; 598 uint32_t i; 599 600 if (!loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, 601 sizeof(VkExtensionProperties))) { 602 return VK_ERROR_OUT_OF_HOST_MEMORY; 603 } 604 605 for (i = 0; i < count; i++) { 606 char spec_version[64]; 607 608 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 609 VK_MAJOR(ext_props[i].specVersion), 610 VK_MINOR(ext_props[i].specVersion), 611 VK_PATCH(ext_props[i].specVersion)); 612 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 613 "Device Extension: %s (%s) version %s", 614 ext_props[i].extensionName, 615 phys_dev->this_icd->this_icd_lib->lib_name, spec_version); 616 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 617 if (res != VK_SUCCESS) 618 return res; 619 } 620 621 return VK_SUCCESS; 622} 623 624static VkResult loader_add_device_extensions( 625 const struct loader_instance *inst, struct loader_icd *icd, 626 VkPhysicalDevice physical_device, const char *lib_name, 627 struct loader_extension_list *ext_list) { 628 uint32_t i, count; 629 VkResult res; 630 VkExtensionProperties *ext_props; 631 632 res = icd->EnumerateDeviceExtensionProperties(physical_device, NULL, &count, 633 NULL); 634 if (res == VK_SUCCESS && count > 0) { 635 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); 636 if (!ext_props) 637 return VK_ERROR_OUT_OF_HOST_MEMORY; 638 res = icd->EnumerateDeviceExtensionProperties(physical_device, NULL, 639 &count, ext_props); 640 if (res != VK_SUCCESS) 641 return res; 642 for (i = 0; i < count; i++) { 643 char spec_version[64]; 644 645 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 646 VK_MAJOR(ext_props[i].specVersion), 647 VK_MINOR(ext_props[i].specVersion), 648 VK_PATCH(ext_props[i].specVersion)); 649 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 650 "Device Extension: %s (%s) version %s", 651 ext_props[i].extensionName, lib_name, spec_version); 652 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 653 if (res != VK_SUCCESS) 654 return res; 655 } 656 } else { 657 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 658 "Error getting physical device extension info count from " 659 "library %s", 660 lib_name); 661 return res; 662 } 663 664 return VK_SUCCESS; 665} 666 667static bool loader_init_generic_list(const struct loader_instance *inst, 668 struct loader_generic_list *list_info, 669 size_t element_size) { 670 list_info->capacity = 32 * element_size; 671 list_info->list = loader_heap_alloc(inst, list_info->capacity, 672 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 673 if (list_info->list == NULL) { 674 return false; 675 } 676 memset(list_info->list, 0, list_info->capacity); 677 list_info->count = 0; 678 return true; 679} 680 681void loader_destroy_generic_list(const struct loader_instance *inst, 682 struct loader_generic_list *list) { 683 loader_heap_free(inst, list->list); 684 list->count = 0; 685 list->capacity = 0; 686} 687 688/* 689 * Append non-duplicate extension properties defined in props 690 * to the given ext_list. 691 * Return 692 * Vk_SUCCESS on success 693 */ 694VkResult loader_add_to_ext_list(const struct loader_instance *inst, 695 struct loader_extension_list *ext_list, 696 uint32_t prop_list_count, 697 const VkExtensionProperties *props) { 698 uint32_t i; 699 const VkExtensionProperties *cur_ext; 700 701 if (ext_list->list == NULL || ext_list->capacity == 0) { 702 loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, 703 sizeof(VkExtensionProperties)); 704 } 705 706 if (ext_list->list == NULL) 707 return VK_ERROR_OUT_OF_HOST_MEMORY; 708 709 for (i = 0; i < prop_list_count; i++) { 710 cur_ext = &props[i]; 711 712 // look for duplicates 713 if (has_vk_extension_property(cur_ext, ext_list)) { 714 continue; 715 } 716 717 // add to list at end 718 // check for enough capacity 719 if (ext_list->count * sizeof(VkExtensionProperties) >= 720 ext_list->capacity) { 721 722 ext_list->list = loader_heap_realloc( 723 inst, ext_list->list, ext_list->capacity, 724 ext_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 725 726 if (ext_list->list == NULL) 727 return VK_ERROR_OUT_OF_HOST_MEMORY; 728 729 // double capacity 730 ext_list->capacity *= 2; 731 } 732 733 memcpy(&ext_list->list[ext_list->count], cur_ext, 734 sizeof(VkExtensionProperties)); 735 ext_list->count++; 736 } 737 return VK_SUCCESS; 738} 739 740/* 741 * Append one extension property defined in props with entrypoints 742 * defined in entrys to the given ext_list. 743 * Return 744 * Vk_SUCCESS on success 745 */ 746VkResult 747loader_add_to_dev_ext_list(const struct loader_instance *inst, 748 struct loader_device_extension_list *ext_list, 749 const VkExtensionProperties *props, 750 uint32_t entry_count, char **entrys) { 751 uint32_t idx; 752 if (ext_list->list == NULL || ext_list->capacity == 0) { 753 loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, 754 sizeof(struct loader_dev_ext_props)); 755 } 756 757 if (ext_list->list == NULL) 758 return VK_ERROR_OUT_OF_HOST_MEMORY; 759 760 idx = ext_list->count; 761 // add to list at end 762 // check for enough capacity 763 if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) { 764 765 ext_list->list = loader_heap_realloc( 766 inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2, 767 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 768 769 if (ext_list->list == NULL) 770 return VK_ERROR_OUT_OF_HOST_MEMORY; 771 772 // double capacity 773 ext_list->capacity *= 2; 774 } 775 776 memcpy(&ext_list->list[idx].props, props, 777 sizeof(struct loader_dev_ext_props)); 778 ext_list->list[idx].entrypoint_count = entry_count; 779 ext_list->list[idx].entrypoints = 780 loader_heap_alloc(inst, sizeof(char *) * entry_count, 781 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 782 if (ext_list->list[idx].entrypoints == NULL) 783 return VK_ERROR_OUT_OF_HOST_MEMORY; 784 for (uint32_t i = 0; i < entry_count; i++) { 785 ext_list->list[idx].entrypoints[i] = loader_heap_alloc( 786 inst, strlen(entrys[i]) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 787 if (ext_list->list[idx].entrypoints[i] == NULL) 788 return VK_ERROR_OUT_OF_HOST_MEMORY; 789 strcpy(ext_list->list[idx].entrypoints[i], entrys[i]); 790 } 791 ext_list->count++; 792 793 return VK_SUCCESS; 794} 795 796/** 797 * Search the given search_list for any layers in the props list. 798 * Add these to the output layer_list. Don't add duplicates to the output 799 * layer_list. 800 */ 801static VkResult 802loader_add_layer_names_to_list(const struct loader_instance *inst, 803 struct loader_layer_list *output_list, 804 uint32_t name_count, const char *const *names, 805 const struct loader_layer_list *search_list) { 806 struct loader_layer_properties *layer_prop; 807 VkResult err = VK_SUCCESS; 808 809 for (uint32_t i = 0; i < name_count; i++) { 810 const char *search_target = names[i]; 811 layer_prop = loader_get_layer_property(search_target, search_list); 812 if (!layer_prop) { 813 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 814 "Unable to find layer %s", search_target); 815 err = VK_ERROR_LAYER_NOT_PRESENT; 816 continue; 817 } 818 819 loader_add_to_layer_list(inst, output_list, 1, layer_prop); 820 } 821 822 return err; 823} 824 825/* 826 * Manage lists of VkLayerProperties 827 */ 828static bool loader_init_layer_list(const struct loader_instance *inst, 829 struct loader_layer_list *list) { 830 list->capacity = 32 * sizeof(struct loader_layer_properties); 831 list->list = loader_heap_alloc(inst, list->capacity, 832 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 833 if (list->list == NULL) { 834 return false; 835 } 836 memset(list->list, 0, list->capacity); 837 list->count = 0; 838 return true; 839} 840 841void loader_destroy_layer_list(const struct loader_instance *inst, 842 struct loader_layer_list *layer_list) { 843 loader_heap_free(inst, layer_list->list); 844 layer_list->count = 0; 845 layer_list->capacity = 0; 846} 847 848/* 849 * Manage list of layer libraries (loader_lib_info) 850 */ 851static bool 852loader_init_layer_library_list(const struct loader_instance *inst, 853 struct loader_layer_library_list *list) { 854 list->capacity = 32 * sizeof(struct loader_lib_info); 855 list->list = loader_heap_alloc(inst, list->capacity, 856 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 857 if (list->list == NULL) { 858 return false; 859 } 860 memset(list->list, 0, list->capacity); 861 list->count = 0; 862 return true; 863} 864 865void loader_destroy_layer_library_list(const struct loader_instance *inst, 866 struct loader_layer_library_list *list) { 867 for (uint32_t i = 0; i < list->count; i++) { 868 loader_heap_free(inst, list->list[i].lib_name); 869 } 870 loader_heap_free(inst, list->list); 871 list->count = 0; 872 list->capacity = 0; 873} 874 875void loader_add_to_layer_library_list(const struct loader_instance *inst, 876 struct loader_layer_library_list *list, 877 uint32_t item_count, 878 const struct loader_lib_info *new_items) { 879 uint32_t i; 880 struct loader_lib_info *item; 881 882 if (list->list == NULL || list->capacity == 0) { 883 loader_init_layer_library_list(inst, list); 884 } 885 886 if (list->list == NULL) 887 return; 888 889 for (i = 0; i < item_count; i++) { 890 item = (struct loader_lib_info *)&new_items[i]; 891 892 // look for duplicates 893 for (uint32_t j = 0; j < list->count; j++) { 894 if (strcmp(list->list[i].lib_name, new_items->lib_name) == 0) { 895 continue; 896 } 897 } 898 899 // add to list at end 900 // check for enough capacity 901 if (list->count * sizeof(struct loader_lib_info) >= list->capacity) { 902 903 list->list = loader_heap_realloc( 904 inst, list->list, list->capacity, list->capacity * 2, 905 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 906 // double capacity 907 list->capacity *= 2; 908 } 909 910 memcpy(&list->list[list->count], item, sizeof(struct loader_lib_info)); 911 list->count++; 912 } 913} 914 915/* 916 * Search the given layer list for a list 917 * matching the given VkLayerProperties 918 */ 919bool has_vk_layer_property(const VkLayerProperties *vk_layer_prop, 920 const struct loader_layer_list *list) { 921 for (uint32_t i = 0; i < list->count; i++) { 922 if (strcmp(vk_layer_prop->layerName, list->list[i].info.layerName) == 0) 923 return true; 924 } 925 return false; 926} 927 928/* 929 * Search the given layer list for a layer 930 * matching the given name 931 */ 932bool has_layer_name(const char *name, const struct loader_layer_list *list) { 933 for (uint32_t i = 0; i < list->count; i++) { 934 if (strcmp(name, list->list[i].info.layerName) == 0) 935 return true; 936 } 937 return false; 938} 939 940/* 941 * Append non-duplicate layer properties defined in prop_list 942 * to the given layer_info list 943 */ 944void loader_add_to_layer_list(const struct loader_instance *inst, 945 struct loader_layer_list *list, 946 uint32_t prop_list_count, 947 const struct loader_layer_properties *props) { 948 uint32_t i; 949 struct loader_layer_properties *layer; 950 951 if (list->list == NULL || list->capacity == 0) { 952 loader_init_layer_list(inst, list); 953 } 954 955 if (list->list == NULL) 956 return; 957 958 for (i = 0; i < prop_list_count; i++) { 959 layer = (struct loader_layer_properties *)&props[i]; 960 961 // look for duplicates 962 if (has_vk_layer_property(&layer->info, list)) { 963 continue; 964 } 965 966 // add to list at end 967 // check for enough capacity 968 if (list->count * sizeof(struct loader_layer_properties) >= 969 list->capacity) { 970 971 list->list = loader_heap_realloc( 972 inst, list->list, list->capacity, list->capacity * 2, 973 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 974 // double capacity 975 list->capacity *= 2; 976 } 977 978 memcpy(&list->list[list->count], layer, 979 sizeof(struct loader_layer_properties)); 980 list->count++; 981 } 982} 983 984/** 985 * Search the search_list for any layer with a name 986 * that matches the given name and a type that matches the given type 987 * Add all matching layers to the found_list 988 * Do not add if found loader_layer_properties is already 989 * on the found_list. 990 */ 991static void 992loader_find_layer_name_add_list(const struct loader_instance *inst, 993 const char *name, const enum layer_type type, 994 const struct loader_layer_list *search_list, 995 struct loader_layer_list *found_list) { 996 bool found = false; 997 for (uint32_t i = 0; i < search_list->count; i++) { 998 struct loader_layer_properties *layer_prop = &search_list->list[i]; 999 if (0 == strcmp(layer_prop->info.layerName, name) && 1000 (layer_prop->type & type)) { 1001 /* Found a layer with the same name, add to found_list */ 1002 loader_add_to_layer_list(inst, found_list, 1, layer_prop); 1003 found = true; 1004 } 1005 } 1006 if (!found) { 1007 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 1008 "Warning, couldn't find layer name %s to activate", name); 1009 } 1010} 1011 1012static VkExtensionProperties * 1013get_extension_property(const char *name, 1014 const struct loader_extension_list *list) { 1015 for (uint32_t i = 0; i < list->count; i++) { 1016 if (strcmp(name, list->list[i].extensionName) == 0) 1017 return &list->list[i]; 1018 } 1019 return NULL; 1020} 1021 1022static VkExtensionProperties * 1023get_dev_extension_property(const char *name, 1024 const struct loader_device_extension_list *list) { 1025 for (uint32_t i = 0; i < list->count; i++) { 1026 if (strcmp(name, list->list[i].props.extensionName) == 0) 1027 return &list->list[i].props; 1028 } 1029 return NULL; 1030} 1031 1032/* 1033 * This function will return the pNext pointer of any 1034 * CreateInfo extensions that are not loader extensions. 1035 * This is used to skip past the loader extensions prepended 1036 * to the list during CreateInstance and CreateDevice. 1037 */ 1038void *loader_strip_create_extensions(const void *pNext) { 1039 VkLayerInstanceCreateInfo *create_info = (VkLayerInstanceCreateInfo *)pNext; 1040 1041 while ( 1042 create_info && 1043 (create_info->sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO || 1044 create_info->sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO)) { 1045 create_info = (VkLayerInstanceCreateInfo *)create_info->pNext; 1046 } 1047 1048 return create_info; 1049} 1050 1051/* 1052 * For Instance extensions implemented within the loader (i.e. DEBUG_REPORT 1053 * the extension must provide two entry points for the loader to use: 1054 * - "trampoline" entry point - this is the address returned by GetProcAddr 1055 * and will always do what's necessary to support a global call. 1056 * - "terminator" function - this function will be put at the end of the 1057 * instance chain and will contain the necessary logic to call / process 1058 * the extension for the appropriate ICDs that are available. 1059 * There is no generic mechanism for including these functions, the references 1060 * must be placed into the appropriate loader entry points. 1061 * GetInstanceProcAddr: call extension GetInstanceProcAddr to check for 1062 * GetProcAddr requests 1063 * loader_coalesce_extensions(void) - add extension records to the list of 1064 * global 1065 * extension available to the app. 1066 * instance_disp - add function pointer for terminator function to this array. 1067 * The extension itself should be in a separate file that will be 1068 * linked directly with the loader. 1069 */ 1070 1071void loader_get_icd_loader_instance_extensions( 1072 const struct loader_instance *inst, struct loader_icd_libs *icd_libs, 1073 struct loader_extension_list *inst_exts) { 1074 struct loader_extension_list icd_exts; 1075 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 1076 "Build ICD instance extension list"); 1077 // traverse scanned icd list adding non-duplicate extensions to the list 1078 for (uint32_t i = 0; i < icd_libs->count; i++) { 1079 loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, 1080 sizeof(VkExtensionProperties)); 1081 loader_add_instance_extensions( 1082 inst, icd_libs->list[i].EnumerateInstanceExtensionProperties, 1083 icd_libs->list[i].lib_name, &icd_exts); 1084 loader_add_to_ext_list(inst, inst_exts, icd_exts.count, icd_exts.list); 1085 loader_destroy_generic_list(inst, 1086 (struct loader_generic_list *)&icd_exts); 1087 }; 1088 1089 // Traverse loader's extensions, adding non-duplicate extensions to the list 1090 wsi_add_instance_extensions(inst, inst_exts); 1091 debug_report_add_instance_extensions(inst, inst_exts); 1092} 1093 1094struct loader_physical_device * 1095loader_get_physical_device(const VkPhysicalDevice physdev) { 1096 uint32_t i; 1097 for (struct loader_instance *inst = loader.instances; inst; 1098 inst = inst->next) { 1099 for (i = 0; i < inst->total_gpu_count; i++) { 1100 // TODO this aliases physDevices within instances, need for this 1101 // function to go away 1102 if (inst->phys_devs[i].disp == 1103 loader_get_instance_dispatch(physdev)) { 1104 return &inst->phys_devs[i]; 1105 } 1106 } 1107 } 1108 return NULL; 1109} 1110 1111struct loader_icd *loader_get_icd_and_device(const VkDevice device, 1112 struct loader_device **found_dev) { 1113 *found_dev = NULL; 1114 for (struct loader_instance *inst = loader.instances; inst; 1115 inst = inst->next) { 1116 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) { 1117 for (struct loader_device *dev = icd->logical_device_list; dev; 1118 dev = dev->next) 1119 /* Value comparison of device prevents object wrapping by layers 1120 */ 1121 if (loader_get_dispatch(dev->device) == 1122 loader_get_dispatch(device)) { 1123 *found_dev = dev; 1124 return icd; 1125 } 1126 } 1127 } 1128 return NULL; 1129} 1130 1131static void loader_destroy_logical_device(const struct loader_instance *inst, 1132 struct loader_device *dev) { 1133 loader_heap_free(inst, dev->app_extension_props); 1134 loader_destroy_layer_list(inst, &dev->activated_layer_list); 1135 loader_heap_free(inst, dev); 1136} 1137 1138static struct loader_device * 1139loader_add_logical_device(const struct loader_instance *inst, 1140 struct loader_device **device_list) { 1141 struct loader_device *new_dev; 1142 1143 new_dev = loader_heap_alloc(inst, sizeof(struct loader_device), 1144 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); 1145 if (!new_dev) { 1146 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1147 "Failed to alloc struct loader-device"); 1148 return NULL; 1149 } 1150 1151 memset(new_dev, 0, sizeof(struct loader_device)); 1152 1153 new_dev->next = *device_list; 1154 *device_list = new_dev; 1155 return new_dev; 1156} 1157 1158void loader_remove_logical_device(const struct loader_instance *inst, 1159 struct loader_icd *icd, 1160 struct loader_device *found_dev) { 1161 struct loader_device *dev, *prev_dev; 1162 1163 if (!icd || !found_dev) 1164 return; 1165 1166 prev_dev = NULL; 1167 dev = icd->logical_device_list; 1168 while (dev && dev != found_dev) { 1169 prev_dev = dev; 1170 dev = dev->next; 1171 } 1172 1173 if (prev_dev) 1174 prev_dev->next = found_dev->next; 1175 else 1176 icd->logical_device_list = found_dev->next; 1177 loader_destroy_logical_device(inst, found_dev); 1178} 1179 1180static void loader_icd_destroy(struct loader_instance *ptr_inst, 1181 struct loader_icd *icd) { 1182 ptr_inst->total_icd_count--; 1183 for (struct loader_device *dev = icd->logical_device_list; dev;) { 1184 struct loader_device *next_dev = dev->next; 1185 loader_destroy_logical_device(ptr_inst, dev); 1186 dev = next_dev; 1187 } 1188 1189 loader_heap_free(ptr_inst, icd); 1190} 1191 1192static struct loader_icd * 1193loader_icd_create(const struct loader_instance *inst) { 1194 struct loader_icd *icd; 1195 1196 icd = loader_heap_alloc(inst, sizeof(*icd), 1197 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1198 if (!icd) 1199 return NULL; 1200 1201 memset(icd, 0, sizeof(*icd)); 1202 1203 return icd; 1204} 1205 1206static struct loader_icd * 1207loader_icd_add(struct loader_instance *ptr_inst, 1208 const struct loader_scanned_icds *icd_lib) { 1209 struct loader_icd *icd; 1210 1211 icd = loader_icd_create(ptr_inst); 1212 if (!icd) 1213 return NULL; 1214 1215 icd->this_icd_lib = icd_lib; 1216 icd->this_instance = ptr_inst; 1217 1218 /* prepend to the list */ 1219 icd->next = ptr_inst->icds; 1220 ptr_inst->icds = icd; 1221 ptr_inst->total_icd_count++; 1222 1223 return icd; 1224} 1225 1226void loader_scanned_icd_clear(const struct loader_instance *inst, 1227 struct loader_icd_libs *icd_libs) { 1228 if (icd_libs->capacity == 0) 1229 return; 1230 for (uint32_t i = 0; i < icd_libs->count; i++) { 1231 loader_platform_close_library(icd_libs->list[i].handle); 1232 loader_heap_free(inst, icd_libs->list[i].lib_name); 1233 } 1234 loader_heap_free(inst, icd_libs->list); 1235 icd_libs->capacity = 0; 1236 icd_libs->count = 0; 1237 icd_libs->list = NULL; 1238} 1239 1240static void loader_scanned_icd_init(const struct loader_instance *inst, 1241 struct loader_icd_libs *icd_libs) { 1242 loader_scanned_icd_clear(inst, icd_libs); 1243 icd_libs->capacity = 8 * sizeof(struct loader_scanned_icds); 1244 icd_libs->list = loader_heap_alloc(inst, icd_libs->capacity, 1245 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1246} 1247 1248static void loader_scanned_icd_add(const struct loader_instance *inst, 1249 struct loader_icd_libs *icd_libs, 1250 const char *filename, uint32_t api_version) { 1251 loader_platform_dl_handle handle; 1252 PFN_vkCreateInstance fp_create_inst; 1253 PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props; 1254 PFN_vkGetInstanceProcAddr fp_get_proc_addr; 1255 struct loader_scanned_icds *new_node; 1256 1257 /* TODO implement ref counting of libraries, for now this function leaves 1258 libraries open and the scanned_icd_clear closes them */ 1259 // Used to call: dlopen(filename, RTLD_LAZY); 1260 handle = loader_platform_open_library(filename); 1261 if (!handle) { 1262 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 1263 loader_platform_open_library_error(filename)); 1264 return; 1265 } 1266 1267 fp_get_proc_addr = 1268 loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr"); 1269 if (!fp_get_proc_addr) { 1270 // Use deprecated interface 1271 fp_get_proc_addr = 1272 loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr"); 1273 if (!fp_get_proc_addr) { 1274 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1275 loader_platform_get_proc_address_error( 1276 "vk_icdGetInstanceProcAddr")); 1277 return; 1278 } else { 1279 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 1280 "Using deprecated ICD interface of " 1281 "vkGetInstanceProcAddr instead of " 1282 "vk_icdGetInstanceProcAddr"); 1283 } 1284 fp_create_inst = 1285 loader_platform_get_proc_address(handle, "vkCreateInstance"); 1286 if (!fp_create_inst) { 1287 loader_log( 1288 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1289 "Couldn't get vkCreateInstance via dlsym/loadlibrary from ICD"); 1290 return; 1291 } 1292 fp_get_inst_ext_props = loader_platform_get_proc_address( 1293 handle, "vkEnumerateInstanceExtensionProperties"); 1294 if (!fp_get_inst_ext_props) { 1295 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1296 "Couldn't get vkEnumerateInstanceExtensionProperties " 1297 "via dlsym/loadlibrary from ICD"); 1298 return; 1299 } 1300 } else { 1301 // Use newer interface 1302 fp_create_inst = 1303 (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance"); 1304 if (!fp_create_inst) { 1305 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1306 "Couldn't get vkCreateInstance via " 1307 "vk_icdGetInstanceProcAddr from ICD"); 1308 return; 1309 } 1310 fp_get_inst_ext_props = 1311 (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr( 1312 NULL, "vkEnumerateInstanceExtensionProperties"); 1313 if (!fp_get_inst_ext_props) { 1314 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1315 "Couldn't get vkEnumerateInstanceExtensionProperties " 1316 "via vk_icdGetInstanceProcAddr from ICD"); 1317 return; 1318 } 1319 } 1320 1321 // check for enough capacity 1322 if ((icd_libs->count * sizeof(struct loader_scanned_icds)) >= 1323 icd_libs->capacity) { 1324 1325 icd_libs->list = loader_heap_realloc( 1326 inst, icd_libs->list, icd_libs->capacity, icd_libs->capacity * 2, 1327 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1328 // double capacity 1329 icd_libs->capacity *= 2; 1330 } 1331 new_node = &(icd_libs->list[icd_libs->count]); 1332 1333 new_node->handle = handle; 1334 new_node->api_version = api_version; 1335 new_node->GetInstanceProcAddr = fp_get_proc_addr; 1336 new_node->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props; 1337 new_node->CreateInstance = fp_create_inst; 1338 1339 new_node->lib_name = (char *)loader_heap_alloc( 1340 inst, strlen(filename) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1341 if (!new_node->lib_name) { 1342 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 1343 "Out of memory can't add icd"); 1344 return; 1345 } 1346 strcpy(new_node->lib_name, filename); 1347 icd_libs->count++; 1348} 1349 1350static bool loader_icd_init_entrys(struct loader_icd *icd, VkInstance inst, 1351 const PFN_vkGetInstanceProcAddr fp_gipa) { 1352/* initialize entrypoint function pointers */ 1353 1354#define LOOKUP_GIPA(func, required) \ 1355 do { \ 1356 icd->func = (PFN_vk##func)fp_gipa(inst, "vk" #func); \ 1357 if (!icd->func && required) { \ 1358 loader_log((struct loader_instance *)inst, \ 1359 VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \ 1360 loader_platform_get_proc_address_error("vk" #func)); \ 1361 return false; \ 1362 } \ 1363 } while (0) 1364 1365 LOOKUP_GIPA(GetDeviceProcAddr, true); 1366 LOOKUP_GIPA(DestroyInstance, true); 1367 LOOKUP_GIPA(EnumeratePhysicalDevices, true); 1368 LOOKUP_GIPA(GetPhysicalDeviceFeatures, true); 1369 LOOKUP_GIPA(GetPhysicalDeviceFormatProperties, true); 1370 LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties, true); 1371 LOOKUP_GIPA(CreateDevice, true); 1372 LOOKUP_GIPA(GetPhysicalDeviceProperties, true); 1373 LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties, true); 1374 LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties, true); 1375 LOOKUP_GIPA(EnumerateDeviceExtensionProperties, true); 1376 LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties, true); 1377 LOOKUP_GIPA(CreateDebugReportCallbackEXT, false); 1378 LOOKUP_GIPA(DestroyDebugReportCallbackEXT, false); 1379 LOOKUP_GIPA(GetPhysicalDeviceSurfaceSupportKHR, false); 1380 LOOKUP_GIPA(GetPhysicalDeviceSurfaceCapabilitiesKHR, false); 1381 LOOKUP_GIPA(GetPhysicalDeviceSurfaceFormatsKHR, false); 1382 LOOKUP_GIPA(GetPhysicalDeviceSurfacePresentModesKHR, false); 1383#ifdef VK_USE_PLATFORM_WIN32_KHR 1384 LOOKUP_GIPA(GetPhysicalDeviceWin32PresentationSupportKHR, false); 1385#endif 1386#ifdef VK_USE_PLATFORM_XCB_KHR 1387 LOOKUP_GIPA(GetPhysicalDeviceXcbPresentationSupportKHR, false); 1388#endif 1389#ifdef VK_USE_PLATFORM_WAYLAND_KHR 1390 LOOKUP_GIPA(GetPhysicalDeviceWaylandPresentationSupportKHR, false); 1391#endif 1392 1393#undef LOOKUP_GIPA 1394 1395 return true; 1396} 1397 1398static void loader_debug_init(void) { 1399 const char *env, *orig; 1400 1401 if (g_loader_debug > 0) 1402 return; 1403 1404 g_loader_debug = 0; 1405 1406 /* parse comma-separated debug options */ 1407 orig = env = loader_getenv("VK_LOADER_DEBUG"); 1408 while (env) { 1409 const char *p = strchr(env, ','); 1410 size_t len; 1411 1412 if (p) 1413 len = p - env; 1414 else 1415 len = strlen(env); 1416 1417 if (len > 0) { 1418 if (strncmp(env, "all", len) == 0) { 1419 g_loader_debug = ~0u; 1420 g_loader_log_msgs = ~0u; 1421 } else if (strncmp(env, "warn", len) == 0) { 1422 g_loader_debug |= LOADER_WARN_BIT; 1423 g_loader_log_msgs |= VK_DEBUG_REPORT_WARNING_BIT_EXT; 1424 } else if (strncmp(env, "info", len) == 0) { 1425 g_loader_debug |= LOADER_INFO_BIT; 1426 g_loader_log_msgs |= VK_DEBUG_REPORT_INFORMATION_BIT_EXT; 1427 } else if (strncmp(env, "perf", len) == 0) { 1428 g_loader_debug |= LOADER_PERF_BIT; 1429 g_loader_log_msgs |= VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; 1430 } else if (strncmp(env, "error", len) == 0) { 1431 g_loader_debug |= LOADER_ERROR_BIT; 1432 g_loader_log_msgs |= VK_DEBUG_REPORT_ERROR_BIT_EXT; 1433 } else if (strncmp(env, "debug", len) == 0) { 1434 g_loader_debug |= LOADER_DEBUG_BIT; 1435 g_loader_log_msgs |= VK_DEBUG_REPORT_DEBUG_BIT_EXT; 1436 } 1437 } 1438 1439 if (!p) 1440 break; 1441 1442 env = p + 1; 1443 } 1444 1445 loader_free_getenv(orig); 1446} 1447 1448void loader_initialize(void) { 1449 // initialize mutexs 1450 loader_platform_thread_create_mutex(&loader_lock); 1451 loader_platform_thread_create_mutex(&loader_json_lock); 1452 1453 // initialize logging 1454 loader_debug_init(); 1455 1456 // initial cJSON to use alloc callbacks 1457 cJSON_Hooks alloc_fns = { 1458 .malloc_fn = loader_tls_heap_alloc, .free_fn = loader_tls_heap_free, 1459 }; 1460 cJSON_InitHooks(&alloc_fns); 1461} 1462 1463struct loader_manifest_files { 1464 uint32_t count; 1465 char **filename_list; 1466}; 1467 1468/** 1469 * Get next file or dirname given a string list or registry key path 1470 * 1471 * \returns 1472 * A pointer to first char in the next path. 1473 * The next path (or NULL) in the list is returned in next_path. 1474 * Note: input string is modified in some cases. PASS IN A COPY! 1475 */ 1476static char *loader_get_next_path(char *path) { 1477 uint32_t len; 1478 char *next; 1479 1480 if (path == NULL) 1481 return NULL; 1482 next = strchr(path, PATH_SEPERATOR); 1483 if (next == NULL) { 1484 len = (uint32_t)strlen(path); 1485 next = path + len; 1486 } else { 1487 *next = '\0'; 1488 next++; 1489 } 1490 1491 return next; 1492} 1493 1494/** 1495 * Given a path which is absolute or relative, expand the path if relative or 1496 * leave the path unmodified if absolute. The base path to prepend to relative 1497 * paths is given in rel_base. 1498 * 1499 * \returns 1500 * A string in out_fullpath of the full absolute path 1501 */ 1502static void loader_expand_path(const char *path, const char *rel_base, 1503 size_t out_size, char *out_fullpath) { 1504 if (loader_platform_is_path_absolute(path)) { 1505 // do not prepend a base to an absolute path 1506 rel_base = ""; 1507 } 1508 1509 loader_platform_combine_path(out_fullpath, out_size, rel_base, path, NULL); 1510} 1511 1512/** 1513 * Given a filename (file) and a list of paths (dir), try to find an existing 1514 * file in the paths. If filename already is a path then no 1515 * searching in the given paths. 1516 * 1517 * \returns 1518 * A string in out_fullpath of either the full path or file. 1519 */ 1520static void loader_get_fullpath(const char *file, const char *dirs, 1521 size_t out_size, char *out_fullpath) { 1522 if (!loader_platform_is_path(file) && *dirs) { 1523 char *dirs_copy, *dir, *next_dir; 1524 1525 dirs_copy = loader_stack_alloc(strlen(dirs) + 1); 1526 strcpy(dirs_copy, dirs); 1527 1528 // find if file exists after prepending paths in given list 1529 for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir)); 1530 dir = next_dir) { 1531 loader_platform_combine_path(out_fullpath, out_size, dir, file, 1532 NULL); 1533 if (loader_platform_file_exists(out_fullpath)) { 1534 return; 1535 } 1536 } 1537 } 1538 1539 snprintf(out_fullpath, out_size, "%s", file); 1540} 1541 1542/** 1543 * Read a JSON file into a buffer. 1544 * 1545 * \returns 1546 * A pointer to a cJSON object representing the JSON parse tree. 1547 * This returned buffer should be freed by caller. 1548 */ 1549static cJSON *loader_get_json(const struct loader_instance *inst, 1550 const char *filename) { 1551 FILE *file; 1552 char *json_buf; 1553 cJSON *json; 1554 size_t len; 1555 file = fopen(filename, "rb"); 1556 if (!file) { 1557 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1558 "Couldn't open JSON file %s", filename); 1559 return NULL; 1560 } 1561 fseek(file, 0, SEEK_END); 1562 len = ftell(file); 1563 fseek(file, 0, SEEK_SET); 1564 json_buf = (char *)loader_stack_alloc(len + 1); 1565 if (json_buf == NULL) { 1566 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1567 "Out of memory can't get JSON file"); 1568 fclose(file); 1569 return NULL; 1570 } 1571 if (fread(json_buf, sizeof(char), len, file) != len) { 1572 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1573 "fread failed can't get JSON file"); 1574 fclose(file); 1575 return NULL; 1576 } 1577 fclose(file); 1578 json_buf[len] = '\0'; 1579 1580 // parse text from file 1581 json = cJSON_Parse(json_buf); 1582 if (json == NULL) 1583 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1584 "Can't parse JSON file %s", filename); 1585 return json; 1586} 1587 1588/** 1589 * Do a deep copy of the loader_layer_properties structure. 1590 */ 1591static void loader_copy_layer_properties(const struct loader_instance *inst, 1592 struct loader_layer_properties *dst, 1593 struct loader_layer_properties *src) { 1594 uint32_t cnt, i; 1595 memcpy(dst, src, sizeof(*src)); 1596 dst->instance_extension_list.list = 1597 loader_heap_alloc(inst, sizeof(VkExtensionProperties) * 1598 src->instance_extension_list.count, 1599 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1600 dst->instance_extension_list.capacity = 1601 sizeof(VkExtensionProperties) * src->instance_extension_list.count; 1602 memcpy(dst->instance_extension_list.list, src->instance_extension_list.list, 1603 dst->instance_extension_list.capacity); 1604 dst->device_extension_list.list = 1605 loader_heap_alloc(inst, sizeof(struct loader_dev_ext_props) * 1606 src->device_extension_list.count, 1607 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1608 1609 dst->device_extension_list.capacity = 1610 sizeof(struct loader_dev_ext_props) * src->device_extension_list.count; 1611 memcpy(dst->device_extension_list.list, src->device_extension_list.list, 1612 dst->device_extension_list.capacity); 1613 if (src->device_extension_list.count > 0 && 1614 src->device_extension_list.list->entrypoint_count > 0) { 1615 cnt = src->device_extension_list.list->entrypoint_count; 1616 dst->device_extension_list.list->entrypoints = loader_heap_alloc( 1617 inst, sizeof(char *) * cnt, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1618 for (i = 0; i < cnt; i++) { 1619 dst->device_extension_list.list->entrypoints[i] = loader_heap_alloc( 1620 inst, 1621 strlen(src->device_extension_list.list->entrypoints[i]) + 1, 1622 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1623 strcpy(dst->device_extension_list.list->entrypoints[i], 1624 src->device_extension_list.list->entrypoints[i]); 1625 } 1626 } 1627} 1628 1629static bool 1630loader_find_layer_name_list(const char *name, 1631 const struct loader_layer_list *layer_list) { 1632 if (!layer_list) 1633 return false; 1634 for (uint32_t j = 0; j < layer_list->count; j++) 1635 if (!strcmp(name, layer_list->list[j].info.layerName)) 1636 return true; 1637 return false; 1638} 1639 1640static bool loader_find_layer_name(const char *name, uint32_t layer_count, 1641 const char **layer_list) { 1642 if (!layer_list) 1643 return false; 1644 for (uint32_t j = 0; j < layer_count; j++) 1645 if (!strcmp(name, layer_list[j])) 1646 return true; 1647 return false; 1648} 1649 1650static bool loader_find_layer_name_array( 1651 const char *name, uint32_t layer_count, 1652 const char layer_list[][VK_MAX_EXTENSION_NAME_SIZE]) { 1653 if (!layer_list) 1654 return false; 1655 for (uint32_t j = 0; j < layer_count; j++) 1656 if (!strcmp(name, layer_list[j])) 1657 return true; 1658 return false; 1659} 1660 1661/** 1662 * Searches through an array of layer names (ppp_layer_names) looking for a 1663 * layer key_name. 1664 * If not found then simply returns updating nothing. 1665 * Otherwise, it uses expand_count, expand_names adding them to layer names. 1666 * Any duplicate (pre-existing) exapand_names in layer names are removed. 1667 * Expand names are added to the back/end of the list of layer names. 1668 * @param inst 1669 * @param layer_count 1670 * @param ppp_layer_names 1671 */ 1672void loader_expand_layer_names( 1673 const struct loader_instance *inst, const char *key_name, 1674 uint32_t expand_count, 1675 const char expand_names[][VK_MAX_EXTENSION_NAME_SIZE], 1676 uint32_t *layer_count, char ***ppp_layer_names) { 1677 char **pp_layer_names, **pp_src_layers = *ppp_layer_names; 1678 1679 if (!loader_find_layer_name(key_name, *layer_count, 1680 (const char **)pp_src_layers)) 1681 return; // didn't find the key_name in the list 1682 1683 // since the total number of layers may expand, allocate new memory for the 1684 // array of pointers 1685 pp_layer_names = 1686 loader_heap_alloc(inst, (expand_count + *layer_count) * sizeof(char *), 1687 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 1688 1689 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, 1690 "Found meta layer %s, replacing with actual layer group", 1691 key_name); 1692 // In place removal of any expand_names found in layer_name (remove 1693 // duplicates) 1694 // Also remove the key_name 1695 uint32_t src_idx, dst_idx, cnt = *layer_count; 1696 for (src_idx = 0; src_idx < *layer_count; src_idx++) { 1697 if (loader_find_layer_name_array(pp_src_layers[src_idx], expand_count, 1698 expand_names)) { 1699 pp_src_layers[src_idx] = NULL; 1700 cnt--; 1701 } else if (!strcmp(pp_src_layers[src_idx], key_name)) { 1702 pp_src_layers[src_idx] = NULL; 1703 cnt--; 1704 } 1705 pp_layer_names[src_idx] = pp_src_layers[src_idx]; 1706 } 1707 for (dst_idx = 0; dst_idx < cnt; dst_idx++) { 1708 if (pp_layer_names[dst_idx] == NULL) { 1709 src_idx = dst_idx + 1; 1710 while (src_idx < *layer_count && pp_src_layers[src_idx] == NULL) 1711 src_idx++; 1712 if (src_idx < *layer_count && pp_src_layers[src_idx] != NULL) 1713 pp_layer_names[dst_idx] = pp_src_layers[src_idx]; 1714 } 1715 } 1716 1717 // Add the expand_names to layer_names 1718 src_idx = 0; 1719 for (dst_idx = cnt; dst_idx < cnt + expand_count; dst_idx++) { 1720 pp_layer_names[dst_idx] = (char *)&expand_names[src_idx++][0]; 1721 } 1722 *layer_count = expand_count + cnt; 1723 *ppp_layer_names = pp_layer_names; 1724 return; 1725} 1726 1727/** 1728 * Restores the layer name list and count into the pCreatInfo structure. 1729 * If is_device == tru then pCreateInfo is a device structure else an instance 1730 * structure. 1731 * @param layer_count 1732 * @param layer_names 1733 * @param pCreateInfo 1734 */ 1735void loader_unexpand_dev_layer_names(const struct loader_instance *inst, 1736 uint32_t layer_count, char **layer_names, 1737 char **layer_ptr, 1738 const VkDeviceCreateInfo *pCreateInfo) { 1739 uint32_t *p_cnt = (uint32_t *)&pCreateInfo->enabledLayerCount; 1740 *p_cnt = layer_count; 1741 1742 char ***p_ptr = (char ***)&pCreateInfo->ppEnabledLayerNames; 1743 if ((char **)pCreateInfo->ppEnabledLayerNames != layer_ptr) 1744 loader_heap_free(inst, (void *)pCreateInfo->ppEnabledLayerNames); 1745 *p_ptr = layer_ptr; 1746 for (uint32_t i = 0; i < layer_count; i++) { 1747 char **pp_str = (char **)&pCreateInfo->ppEnabledLayerNames[i]; 1748 *pp_str = layer_names[i]; 1749 } 1750} 1751 1752void loader_unexpand_inst_layer_names(const struct loader_instance *inst, 1753 uint32_t layer_count, char **layer_names, 1754 char **layer_ptr, 1755 const VkInstanceCreateInfo *pCreateInfo) { 1756 uint32_t *p_cnt = (uint32_t *)&pCreateInfo->enabledLayerCount; 1757 *p_cnt = layer_count; 1758 1759 char ***p_ptr = (char ***)&pCreateInfo->ppEnabledLayerNames; 1760 if ((char **)pCreateInfo->ppEnabledLayerNames != layer_ptr) 1761 loader_heap_free(inst, (void *)pCreateInfo->ppEnabledLayerNames); 1762 *p_ptr = layer_ptr; 1763 for (uint32_t i = 0; i < layer_count; i++) { 1764 char **pp_str = (char **)&pCreateInfo->ppEnabledLayerNames[i]; 1765 *pp_str = layer_names[i]; 1766 } 1767} 1768 1769/** 1770 * Searches through the existing instance and device layer lists looking for 1771 * the set of required layer names. If found then it adds a meta property to the 1772 * layer list. 1773 * Assumes the required layers are the same for both instance and device lists. 1774 * @param inst 1775 * @param layer_count number of layers in layer_names 1776 * @param layer_names array of required layer names 1777 * @param layer_instance_list 1778 * @param layer_device_list 1779 */ 1780static void loader_add_layer_property_meta( 1781 const struct loader_instance *inst, uint32_t layer_count, 1782 const char layer_names[][VK_MAX_EXTENSION_NAME_SIZE], 1783 struct loader_layer_list *layer_instance_list, 1784 struct loader_layer_list *layer_device_list) { 1785 uint32_t i, j; 1786 bool found; 1787 struct loader_layer_list *layer_list; 1788 1789 if (0 == layer_count || (!layer_instance_list && !layer_device_list)) 1790 return; 1791 if ((layer_instance_list && (layer_count > layer_instance_list->count)) && 1792 (layer_device_list && (layer_count > layer_device_list->count))) 1793 return; 1794 1795 for (j = 0; j < 2; j++) { 1796 if (j == 0) 1797 layer_list = layer_instance_list; 1798 else 1799 layer_list = layer_device_list; 1800 found = true; 1801 if (layer_list == NULL) 1802 continue; 1803 for (i = 0; i < layer_count; i++) { 1804 if (loader_find_layer_name_list(layer_names[i], layer_list)) 1805 continue; 1806 found = false; 1807 break; 1808 } 1809 1810 struct loader_layer_properties *props; 1811 if (found) { 1812 props = loader_get_next_layer_property(inst, layer_list); 1813 props->type = VK_LAYER_TYPE_META_EXPLICT; 1814 strncpy(props->info.description, "LunarG Standard Validation Layer", 1815 sizeof(props->info.description)); 1816 props->info.implementationVersion = 1; 1817 strncpy(props->info.layerName, std_validation_str, 1818 sizeof(props->info.layerName)); 1819 // TODO what about specVersion? for now insert loader's built 1820 // version 1821 props->info.specVersion = VK_API_VERSION; 1822 } 1823 } 1824} 1825 1826/** 1827 * Given a cJSON struct (json) of the top level JSON object from layer manifest 1828 * file, add entry to the layer_list. 1829 * Fill out the layer_properties in this list entry from the input cJSON object. 1830 * 1831 * \returns 1832 * void 1833 * layer_list has a new entry and initialized accordingly. 1834 * If the json input object does not have all the required fields no entry 1835 * is added to the list. 1836 */ 1837static void 1838loader_add_layer_properties(const struct loader_instance *inst, 1839 struct loader_layer_list *layer_instance_list, 1840 struct loader_layer_list *layer_device_list, 1841 cJSON *json, bool is_implicit, char *filename) { 1842 /* Fields in layer manifest file that are required: 1843 * (required) “file_format_version” 1844 * following are required in the "layer" object: 1845 * (required) "name" 1846 * (required) "type" 1847 * (required) “library_path” 1848 * (required) “api_version” 1849 * (required) “implementation_version” 1850 * (required) “description” 1851 * (required for implicit layers) “disable_environment” 1852 * 1853 * First get all required items and if any missing abort 1854 */ 1855 1856 cJSON *item, *layer_node, *ext_item; 1857 char *temp; 1858 char *name, *type, *library_path, *api_version; 1859 char *implementation_version, *description; 1860 cJSON *disable_environment = NULL; 1861 int i, j; 1862 VkExtensionProperties ext_prop; 1863 item = cJSON_GetObjectItem(json, "file_format_version"); 1864 if (item == NULL) { 1865 return; 1866 } 1867 char *file_vers = cJSON_PrintUnformatted(item); 1868 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, 1869 "Found manifest file %s, version %s", filename, file_vers); 1870 if (strcmp(file_vers, "\"1.0.0\"") != 0) 1871 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 1872 "Unexpected manifest file version (expected 1.0.0), may " 1873 "cause errors"); 1874 loader_tls_heap_free(file_vers); 1875 1876 layer_node = cJSON_GetObjectItem(json, "layer"); 1877 if (layer_node == NULL) { 1878 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 1879 "Can't find \"layer\" object in manifest JSON file, " 1880 "skipping this file"); 1881 return; 1882 } 1883 1884 // loop through all "layer" objects in the file 1885 do { 1886#define GET_JSON_OBJECT(node, var) \ 1887 { \ 1888 var = cJSON_GetObjectItem(node, #var); \ 1889 if (var == NULL) { \ 1890 layer_node = layer_node->next; \ 1891 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \ 1892 "Didn't find required layer object %s in manifest " \ 1893 "JSON file, skipping this layer", \ 1894 #var); \ 1895 continue; \ 1896 } \ 1897 } 1898#define GET_JSON_ITEM(node, var) \ 1899 { \ 1900 item = cJSON_GetObjectItem(node, #var); \ 1901 if (item == NULL) { \ 1902 layer_node = layer_node->next; \ 1903 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \ 1904 "Didn't find required layer value %s in manifest JSON " \ 1905 "file, skipping this layer", \ 1906 #var); \ 1907 continue; \ 1908 } \ 1909 temp = cJSON_Print(item); \ 1910 temp[strlen(temp) - 1] = '\0'; \ 1911 var = loader_stack_alloc(strlen(temp) + 1); \ 1912 strcpy(var, &temp[1]); \ 1913 loader_tls_heap_free(temp); \ 1914 } 1915 GET_JSON_ITEM(layer_node, name) 1916 GET_JSON_ITEM(layer_node, type) 1917 GET_JSON_ITEM(layer_node, library_path) 1918 GET_JSON_ITEM(layer_node, api_version) 1919 GET_JSON_ITEM(layer_node, implementation_version) 1920 GET_JSON_ITEM(layer_node, description) 1921 if (is_implicit) { 1922 GET_JSON_OBJECT(layer_node, disable_environment) 1923 } 1924#undef GET_JSON_ITEM 1925#undef GET_JSON_OBJECT 1926 1927 // add list entry 1928 struct loader_layer_properties *props = NULL; 1929 if (!strcmp(type, "DEVICE")) { 1930 if (layer_device_list == NULL) { 1931 layer_node = layer_node->next; 1932 continue; 1933 } 1934 props = loader_get_next_layer_property(inst, layer_device_list); 1935 props->type = (is_implicit) ? VK_LAYER_TYPE_DEVICE_IMPLICIT 1936 : VK_LAYER_TYPE_DEVICE_EXPLICIT; 1937 } 1938 if (!strcmp(type, "INSTANCE")) { 1939 if (layer_instance_list == NULL) { 1940 layer_node = layer_node->next; 1941 continue; 1942 } 1943 props = loader_get_next_layer_property(inst, layer_instance_list); 1944 props->type = (is_implicit) ? VK_LAYER_TYPE_INSTANCE_IMPLICIT 1945 : VK_LAYER_TYPE_INSTANCE_EXPLICIT; 1946 } 1947 if (!strcmp(type, "GLOBAL")) { 1948 if (layer_instance_list != NULL) 1949 props = 1950 loader_get_next_layer_property(inst, layer_instance_list); 1951 else if (layer_device_list != NULL) 1952 props = loader_get_next_layer_property(inst, layer_device_list); 1953 else { 1954 layer_node = layer_node->next; 1955 continue; 1956 } 1957 props->type = (is_implicit) ? VK_LAYER_TYPE_GLOBAL_IMPLICIT 1958 : VK_LAYER_TYPE_GLOBAL_EXPLICIT; 1959 } 1960 1961 if (props == NULL) { 1962 layer_node = layer_node->next; 1963 continue; 1964 } 1965 1966 strncpy(props->info.layerName, name, sizeof(props->info.layerName)); 1967 props->info.layerName[sizeof(props->info.layerName) - 1] = '\0'; 1968 1969 char *fullpath = props->lib_name; 1970 char *rel_base; 1971 if (loader_platform_is_path(library_path)) { 1972 // a relative or absolute path 1973 char *name_copy = loader_stack_alloc(strlen(filename) + 1); 1974 strcpy(name_copy, filename); 1975 rel_base = loader_platform_dirname(name_copy); 1976 loader_expand_path(library_path, rel_base, MAX_STRING_SIZE, 1977 fullpath); 1978 } else { 1979 // a filename which is assumed in a system directory 1980 loader_get_fullpath(library_path, DEFAULT_VK_LAYERS_PATH, 1981 MAX_STRING_SIZE, fullpath); 1982 } 1983 props->info.specVersion = loader_make_version(api_version); 1984 props->info.implementationVersion = atoi(implementation_version); 1985 strncpy((char *)props->info.description, description, 1986 sizeof(props->info.description)); 1987 props->info.description[sizeof(props->info.description) - 1] = '\0'; 1988 if (is_implicit) { 1989 if (!disable_environment || !disable_environment->child) { 1990 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 1991 "Didn't find required layer child value disable_environment" 1992 "in manifest JSON file, skipping this layer"); 1993 layer_node = layer_node->next; 1994 continue; 1995 } 1996 strncpy(props->disable_env_var.name, 1997 disable_environment->child->string, 1998 sizeof(props->disable_env_var.name)); 1999 props->disable_env_var 2000 .name[sizeof(props->disable_env_var.name) - 1] = '\0'; 2001 strncpy(props->disable_env_var.value, 2002 disable_environment->child->valuestring, 2003 sizeof(props->disable_env_var.value)); 2004 props->disable_env_var 2005 .value[sizeof(props->disable_env_var.value) - 1] = '\0'; 2006 } 2007 2008/** 2009 * Now get all optional items and objects and put in list: 2010 * functions 2011 * instance_extensions 2012 * device_extensions 2013 * enable_environment (implicit layers only) 2014 */ 2015#define GET_JSON_OBJECT(node, var) \ 2016 { var = cJSON_GetObjectItem(node, #var); } 2017#define GET_JSON_ITEM(node, var) \ 2018 { \ 2019 item = cJSON_GetObjectItem(node, #var); \ 2020 if (item != NULL) { \ 2021 temp = cJSON_Print(item); \ 2022 temp[strlen(temp) - 1] = '\0'; \ 2023 var = loader_stack_alloc(strlen(temp) + 1); \ 2024 strcpy(var, &temp[1]); \ 2025 loader_tls_heap_free(temp); \ 2026 } \ 2027 } 2028 2029 cJSON *instance_extensions, *device_extensions, *functions, 2030 *enable_environment; 2031 cJSON *entrypoints; 2032 char *vkGetInstanceProcAddr, *vkGetDeviceProcAddr, *spec_version; 2033 char **entry_array; 2034 vkGetInstanceProcAddr = NULL; 2035 vkGetDeviceProcAddr = NULL; 2036 spec_version = NULL; 2037 entrypoints = NULL; 2038 entry_array = NULL; 2039 /** 2040 * functions 2041 * vkGetInstanceProcAddr 2042 * vkGetDeviceProcAddr 2043 */ 2044 GET_JSON_OBJECT(layer_node, functions) 2045 if (functions != NULL) { 2046 GET_JSON_ITEM(functions, vkGetInstanceProcAddr) 2047 GET_JSON_ITEM(functions, vkGetDeviceProcAddr) 2048 if (vkGetInstanceProcAddr != NULL) 2049 strncpy(props->functions.str_gipa, vkGetInstanceProcAddr, 2050 sizeof(props->functions.str_gipa)); 2051 props->functions.str_gipa[sizeof(props->functions.str_gipa) - 1] = 2052 '\0'; 2053 if (vkGetDeviceProcAddr != NULL) 2054 strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr, 2055 sizeof(props->functions.str_gdpa)); 2056 props->functions.str_gdpa[sizeof(props->functions.str_gdpa) - 1] = 2057 '\0'; 2058 } 2059 /** 2060 * instance_extensions 2061 * array of 2062 * name 2063 * spec_version 2064 */ 2065 GET_JSON_OBJECT(layer_node, instance_extensions) 2066 if (instance_extensions != NULL) { 2067 int count = cJSON_GetArraySize(instance_extensions); 2068 for (i = 0; i < count; i++) { 2069 ext_item = cJSON_GetArrayItem(instance_extensions, i); 2070 GET_JSON_ITEM(ext_item, name) 2071 GET_JSON_ITEM(ext_item, spec_version) 2072 if (name != NULL) { 2073 strncpy(ext_prop.extensionName, name, 2074 sizeof(ext_prop.extensionName)); 2075 ext_prop.extensionName[sizeof(ext_prop.extensionName) - 1] = 2076 '\0'; 2077 } 2078 ext_prop.specVersion = atoi(spec_version); 2079 loader_add_to_ext_list(inst, &props->instance_extension_list, 1, 2080 &ext_prop); 2081 } 2082 } 2083 /** 2084 * device_extensions 2085 * array of 2086 * name 2087 * spec_version 2088 * entrypoints 2089 */ 2090 GET_JSON_OBJECT(layer_node, device_extensions) 2091 if (device_extensions != NULL) { 2092 int count = cJSON_GetArraySize(device_extensions); 2093 for (i = 0; i < count; i++) { 2094 ext_item = cJSON_GetArrayItem(device_extensions, i); 2095 GET_JSON_ITEM(ext_item, name) 2096 GET_JSON_ITEM(ext_item, spec_version) 2097 if (name != NULL) { 2098 strncpy(ext_prop.extensionName, name, 2099 sizeof(ext_prop.extensionName)); 2100 ext_prop.extensionName[sizeof(ext_prop.extensionName) - 1] = 2101 '\0'; 2102 } 2103 ext_prop.specVersion = atoi(spec_version); 2104 // entrypoints = cJSON_GetObjectItem(ext_item, "entrypoints"); 2105 GET_JSON_OBJECT(ext_item, entrypoints) 2106 int entry_count; 2107 if (entrypoints == NULL) { 2108 loader_add_to_dev_ext_list(inst, 2109 &props->device_extension_list, 2110 &ext_prop, 0, NULL); 2111 continue; 2112 } 2113 entry_count = cJSON_GetArraySize(entrypoints); 2114 if (entry_count) 2115 entry_array = (char **)loader_stack_alloc(sizeof(char *) * 2116 entry_count); 2117 for (j = 0; j < entry_count; j++) { 2118 ext_item = cJSON_GetArrayItem(entrypoints, j); 2119 if (ext_item != NULL) { 2120 temp = cJSON_Print(ext_item); 2121 temp[strlen(temp) - 1] = '\0'; 2122 entry_array[j] = loader_stack_alloc(strlen(temp) + 1); 2123 strcpy(entry_array[j], &temp[1]); 2124 loader_tls_heap_free(temp); 2125 } 2126 } 2127 loader_add_to_dev_ext_list(inst, &props->device_extension_list, 2128 &ext_prop, entry_count, entry_array); 2129 } 2130 } 2131 if (is_implicit) { 2132 GET_JSON_OBJECT(layer_node, enable_environment) 2133 2134 // enable_environment is optional 2135 if (enable_environment) { 2136 strncpy(props->enable_env_var.name, 2137 enable_environment->child->string, 2138 sizeof(props->enable_env_var.name)); 2139 props->enable_env_var 2140 .name[sizeof(props->enable_env_var.name) - 1] = '\0'; 2141 strncpy(props->enable_env_var.value, 2142 enable_environment->child->valuestring, 2143 sizeof(props->enable_env_var.value)); 2144 props->enable_env_var 2145 .value[sizeof(props->enable_env_var.value) - 1] = '\0'; 2146 } 2147 } 2148#undef GET_JSON_ITEM 2149#undef GET_JSON_OBJECT 2150 // for global layers need to add them to both device and instance list 2151 if (!strcmp(type, "GLOBAL")) { 2152 struct loader_layer_properties *dev_props; 2153 if (layer_instance_list == NULL || layer_device_list == NULL) { 2154 layer_node = layer_node->next; 2155 continue; 2156 } 2157 dev_props = loader_get_next_layer_property(inst, layer_device_list); 2158 // copy into device layer list 2159 loader_copy_layer_properties(inst, dev_props, props); 2160 } 2161 layer_node = layer_node->next; 2162 } while (layer_node != NULL); 2163 return; 2164} 2165 2166/** 2167 * Find the Vulkan library manifest files. 2168 * 2169 * This function scans the "location" or "env_override" directories/files 2170 * for a list of JSON manifest files. If env_override is non-NULL 2171 * and has a valid value. Then the location is ignored. Otherwise 2172 * location is used to look for manifest files. The location 2173 * is interpreted as Registry path on Windows and a directory path(s) 2174 * on Linux. "home_location" is an additional directory in the users home 2175 * directory to look at. It is exapanded into the dir path $HOME/home_location. 2176 * This "home_location" is only used on Linux. 2177 * 2178 * \returns 2179 * A string list of manifest files to be opened in out_files param. 2180 * List has a pointer to string for each manifest filename. 2181 * When done using the list in out_files, pointers should be freed. 2182 * Location or override string lists can be either files or directories as 2183 *follows: 2184 * | location | override 2185 * -------------------------------- 2186 * Win ICD | files | files 2187 * Win Layer | files | dirs 2188 * Linux ICD | dirs | files 2189 * Linux Layer| dirs | dirs 2190 */ 2191static void loader_get_manifest_files(const struct loader_instance *inst, 2192 const char *env_override, bool is_layer, 2193 const char *location, 2194 const char *home_location, 2195 struct loader_manifest_files *out_files) { 2196 char *override = NULL; 2197 char *loc; 2198 char *file, *next_file, *name; 2199 size_t alloced_count = 64; 2200 char full_path[2048]; 2201 DIR *sysdir = NULL; 2202 bool list_is_dirs = false; 2203 struct dirent *dent; 2204 2205 out_files->count = 0; 2206 out_files->filename_list = NULL; 2207 2208 if (env_override != NULL && (override = loader_getenv(env_override))) { 2209#if !defined(_WIN32) 2210 if (geteuid() != getuid()) { 2211 /* Don't allow setuid apps to use the env var: */ 2212 loader_free_getenv(override); 2213 override = NULL; 2214 } 2215#endif 2216 } 2217 2218#if !defined(_WIN32) 2219 if (location == NULL && home_location == NULL) { 2220#else 2221 home_location = NULL; 2222 if (location == NULL) { 2223#endif 2224 loader_log( 2225 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2226 "Can't get manifest files with NULL location, env_override=%s", 2227 env_override); 2228 return; 2229 } 2230 2231#if defined(_WIN32) 2232 list_is_dirs = (is_layer && override != NULL) ? true : false; 2233#else 2234 list_is_dirs = (override == NULL || is_layer) ? true : false; 2235#endif 2236 // Make a copy of the input we are using so it is not modified 2237 // Also handle getting the location(s) from registry on Windows 2238 if (override == NULL) { 2239 loc = loader_stack_alloc(strlen(location) + 1); 2240 if (loc == NULL) { 2241 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2242 "Out of memory can't get manifest files"); 2243 return; 2244 } 2245 strcpy(loc, location); 2246#if defined(_WIN32) 2247 loc = loader_get_registry_files(inst, loc); 2248 if (loc == NULL) { 2249 if (!is_layer) { 2250 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2251 "Registry lookup failed can't get ICD manifest " 2252 "files, do you have a Vulkan driver installed"); 2253 } else { 2254 // warning only for layers 2255 loader_log( 2256 inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 2257 "Registry lookup failed can't get layer manifest files"); 2258 } 2259 return; 2260 } 2261#endif 2262 } else { 2263 loc = loader_stack_alloc(strlen(override) + 1); 2264 if (loc == NULL) { 2265 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2266 "Out of memory can't get manifest files"); 2267 return; 2268 } 2269 strcpy(loc, override); 2270 loader_free_getenv(override); 2271 } 2272 2273 // Print out the paths being searched if debugging is enabled 2274 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2275 "Searching the following paths for manifest files: %s\n", loc); 2276 2277 file = loc; 2278 while (*file) { 2279 next_file = loader_get_next_path(file); 2280 if (list_is_dirs) { 2281 sysdir = opendir(file); 2282 name = NULL; 2283 if (sysdir) { 2284 dent = readdir(sysdir); 2285 if (dent == NULL) 2286 break; 2287 name = &(dent->d_name[0]); 2288 loader_get_fullpath(name, file, sizeof(full_path), full_path); 2289 name = full_path; 2290 } 2291 } else { 2292#if defined(_WIN32) 2293 name = file; 2294#else 2295 // only Linux has relative paths 2296 char *dir; 2297 // make a copy of location so it isn't modified 2298 dir = loader_stack_alloc(strlen(loc) + 1); 2299 if (dir == NULL) { 2300 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2301 "Out of memory can't get manifest files"); 2302 return; 2303 } 2304 strcpy(dir, loc); 2305 2306 loader_get_fullpath(file, dir, sizeof(full_path), full_path); 2307 2308 name = full_path; 2309#endif 2310 } 2311 while (name) { 2312 /* Look for files ending with ".json" suffix */ 2313 uint32_t nlen = (uint32_t)strlen(name); 2314 const char *suf = name + nlen - 5; 2315 if ((nlen > 5) && !strncmp(suf, ".json", 5)) { 2316 if (out_files->count == 0) { 2317 out_files->filename_list = 2318 loader_heap_alloc(inst, alloced_count * sizeof(char *), 2319 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 2320 } else if (out_files->count == alloced_count) { 2321 out_files->filename_list = 2322 loader_heap_realloc(inst, out_files->filename_list, 2323 alloced_count * sizeof(char *), 2324 alloced_count * sizeof(char *) * 2, 2325 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 2326 alloced_count *= 2; 2327 } 2328 if (out_files->filename_list == NULL) { 2329 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2330 "Out of memory can't alloc manifest file list"); 2331 return; 2332 } 2333 out_files->filename_list[out_files->count] = loader_heap_alloc( 2334 inst, strlen(name) + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 2335 if (out_files->filename_list[out_files->count] == NULL) { 2336 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2337 "Out of memory can't get manifest files"); 2338 return; 2339 } 2340 strcpy(out_files->filename_list[out_files->count], name); 2341 out_files->count++; 2342 } else if (!list_is_dirs) { 2343 loader_log( 2344 inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 2345 "Skipping manifest file %s, file name must end in .json", 2346 name); 2347 } 2348 if (list_is_dirs) { 2349 dent = readdir(sysdir); 2350 if (dent == NULL) 2351 break; 2352 name = &(dent->d_name[0]); 2353 loader_get_fullpath(name, file, sizeof(full_path), full_path); 2354 name = full_path; 2355 } else { 2356 break; 2357 } 2358 } 2359 if (sysdir) 2360 closedir(sysdir); 2361 file = next_file; 2362#if !defined(_WIN32) 2363 if (home_location != NULL && (next_file == NULL || *next_file == '\0') 2364 && override == NULL) { 2365 char *home = secure_getenv("HOME"); 2366 if (home != NULL) { 2367 size_t len; 2368 char *home_loc = loader_stack_alloc(strlen(home) + 2 + 2369 strlen(home_location)); 2370 if (home_loc == NULL) { 2371 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2372 "Out of memory can't get manifest files"); 2373 return; 2374 } 2375 strcpy(home_loc, home); 2376 // Add directory separator if needed 2377 if (home_location[0] != DIRECTORY_SYMBOL) { 2378 len = strlen(home_loc); 2379 home_loc[len] = DIRECTORY_SYMBOL; 2380 home_loc[len+1] = '\0'; 2381 } 2382 strcat(home_loc, home_location); 2383 file = home_loc; 2384 next_file = loader_get_next_path(file); 2385 home_location = NULL; 2386 2387 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2388 "Searching the following paths for manifest files: %s\n", 2389 home_loc); 2390 list_is_dirs = true; 2391 } 2392 } 2393#endif 2394 } 2395 return; 2396} 2397 2398void loader_init_icd_lib_list() {} 2399 2400void loader_destroy_icd_lib_list() {} 2401/** 2402 * Try to find the Vulkan ICD driver(s). 2403 * 2404 * This function scans the default system loader path(s) or path 2405 * specified by the \c VK_ICD_FILENAMES environment variable in 2406 * order to find loadable VK ICDs manifest files. From these 2407 * manifest files it finds the ICD libraries. 2408 * 2409 * \returns 2410 * a list of icds that were discovered 2411 */ 2412void loader_icd_scan(const struct loader_instance *inst, 2413 struct loader_icd_libs *icds) { 2414 char *file_str; 2415 struct loader_manifest_files manifest_files; 2416 2417 loader_scanned_icd_init(inst, icds); 2418 // Get a list of manifest files for ICDs 2419 loader_get_manifest_files(inst, "VK_ICD_FILENAMES", false, 2420 DEFAULT_VK_DRIVERS_INFO, HOME_VK_DRIVERS_INFO, 2421 &manifest_files); 2422 if (manifest_files.count == 0) 2423 return; 2424 loader_platform_thread_lock_mutex(&loader_json_lock); 2425 for (uint32_t i = 0; i < manifest_files.count; i++) { 2426 file_str = manifest_files.filename_list[i]; 2427 if (file_str == NULL) 2428 continue; 2429 2430 cJSON *json; 2431 json = loader_get_json(inst, file_str); 2432 if (!json) 2433 continue; 2434 cJSON *item, *itemICD; 2435 item = cJSON_GetObjectItem(json, "file_format_version"); 2436 if (item == NULL) { 2437 loader_platform_thread_unlock_mutex(&loader_json_lock); 2438 return; 2439 } 2440 char *file_vers = cJSON_Print(item); 2441 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, 2442 "Found manifest file %s, version %s", file_str, file_vers); 2443 if (strcmp(file_vers, "\"1.0.0\"") != 0) 2444 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 2445 "Unexpected manifest file version (expected 1.0.0), may " 2446 "cause errors"); 2447 loader_tls_heap_free(file_vers); 2448 itemICD = cJSON_GetObjectItem(json, "ICD"); 2449 if (itemICD != NULL) { 2450 item = cJSON_GetObjectItem(itemICD, "library_path"); 2451 if (item != NULL) { 2452 char *temp = cJSON_Print(item); 2453 if (!temp || strlen(temp) == 0) { 2454 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 2455 "Can't find \"library_path\" in ICD JSON file " 2456 "%s, skipping", 2457 file_str); 2458 loader_tls_heap_free(temp); 2459 loader_heap_free(inst, file_str); 2460 cJSON_Delete(json); 2461 continue; 2462 } 2463 // strip out extra quotes 2464 temp[strlen(temp) - 1] = '\0'; 2465 char *library_path = loader_stack_alloc(strlen(temp) + 1); 2466 strcpy(library_path, &temp[1]); 2467 loader_tls_heap_free(temp); 2468 if (!library_path || strlen(library_path) == 0) { 2469 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 2470 "Can't find \"library_path\" in ICD JSON file " 2471 "%s, skipping", 2472 file_str); 2473 loader_heap_free(inst, file_str); 2474 cJSON_Delete(json); 2475 continue; 2476 } 2477 char fullpath[MAX_STRING_SIZE]; 2478 // Print out the paths being searched if debugging is enabled 2479 loader_log( 2480 inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2481 "Searching for ICD drivers named %s default dir %s\n", 2482 library_path, DEFAULT_VK_DRIVERS_PATH); 2483 if (loader_platform_is_path(library_path)) { 2484 // a relative or absolute path 2485 char *name_copy = loader_stack_alloc(strlen(file_str) + 1); 2486 char *rel_base; 2487 strcpy(name_copy, file_str); 2488 rel_base = loader_platform_dirname(name_copy); 2489 loader_expand_path(library_path, rel_base, sizeof(fullpath), 2490 fullpath); 2491 } else { 2492 // a filename which is assumed in a system directory 2493 loader_get_fullpath(library_path, DEFAULT_VK_DRIVERS_PATH, 2494 sizeof(fullpath), fullpath); 2495 } 2496 2497 uint32_t vers = 0; 2498 item = cJSON_GetObjectItem(itemICD, "api_version"); 2499 if (item != NULL) { 2500 temp = cJSON_Print(item); 2501 vers = loader_make_version(temp); 2502 loader_tls_heap_free(temp); 2503 } 2504 loader_scanned_icd_add(inst, icds, fullpath, vers); 2505 } else 2506 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 2507 "Can't find \"library_path\" object in ICD JSON " 2508 "file %s, skipping", 2509 file_str); 2510 } else 2511 loader_log( 2512 inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, 2513 "Can't find \"ICD\" object in ICD JSON file %s, skipping", 2514 file_str); 2515 2516 loader_heap_free(inst, file_str); 2517 cJSON_Delete(json); 2518 } 2519 loader_heap_free(inst, manifest_files.filename_list); 2520 loader_platform_thread_unlock_mutex(&loader_json_lock); 2521} 2522 2523void loader_layer_scan(const struct loader_instance *inst, 2524 struct loader_layer_list *instance_layers, 2525 struct loader_layer_list *device_layers) { 2526 char *file_str; 2527 struct loader_manifest_files 2528 manifest_files[2]; // [0] = explicit, [1] = implicit 2529 cJSON *json; 2530 uint32_t i; 2531 uint32_t implicit; 2532 2533 // Get a list of manifest files for explicit layers 2534 loader_get_manifest_files(inst, LAYERS_PATH_ENV, true, 2535 DEFAULT_VK_ELAYERS_INFO, HOME_VK_ELAYERS_INFO, 2536 &manifest_files[0]); 2537 // Pass NULL for environment variable override - implicit layers are not 2538 // overridden by LAYERS_PATH_ENV 2539 loader_get_manifest_files(inst, NULL, true, DEFAULT_VK_ILAYERS_INFO, 2540 HOME_VK_ILAYERS_INFO, &manifest_files[1]); 2541 if (manifest_files[0].count == 0 && manifest_files[1].count == 0) 2542 return; 2543 2544#if 0 // TODO 2545 /** 2546 * We need a list of the layer libraries, not just a list of 2547 * the layer properties (a layer library could expose more than 2548 * one layer property). This list of scanned layers would be 2549 * used to check for global and physicaldevice layer properties. 2550 */ 2551 if (!loader_init_layer_library_list(&loader.scanned_layer_libraries)) { 2552 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2553 "Alloc for layer list failed: %s line: %d", __FILE__, __LINE__); 2554 return; 2555 } 2556#endif 2557 2558 /* cleanup any previously scanned libraries */ 2559 loader_delete_layer_properties(inst, instance_layers); 2560 loader_delete_layer_properties(inst, device_layers); 2561 2562 loader_platform_thread_lock_mutex(&loader_json_lock); 2563 for (implicit = 0; implicit < 2; implicit++) { 2564 for (i = 0; i < manifest_files[implicit].count; i++) { 2565 file_str = manifest_files[implicit].filename_list[i]; 2566 if (file_str == NULL) 2567 continue; 2568 2569 // parse file into JSON struct 2570 json = loader_get_json(inst, file_str); 2571 if (!json) { 2572 continue; 2573 } 2574 2575 // TODO error if device layers expose instance_extensions 2576 // TODO error if instance layers expose device extensions 2577 loader_add_layer_properties(inst, instance_layers, device_layers, 2578 json, (implicit == 1), file_str); 2579 2580 loader_heap_free(inst, file_str); 2581 cJSON_Delete(json); 2582 } 2583 } 2584 if (manifest_files[0].count != 0) 2585 loader_heap_free(inst, manifest_files[0].filename_list); 2586 2587 if (manifest_files[1].count != 0) 2588 loader_heap_free(inst, manifest_files[1].filename_list); 2589 2590 // add a meta layer for validation if the validation layers are all present 2591 loader_add_layer_property_meta( 2592 inst, sizeof(std_validation_names) / sizeof(std_validation_names[0]), 2593 std_validation_names, instance_layers, device_layers); 2594 2595 loader_platform_thread_unlock_mutex(&loader_json_lock); 2596} 2597 2598static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL 2599loader_gpa_instance_internal(VkInstance inst, const char *pName) { 2600 if (!strcmp(pName, "vkGetInstanceProcAddr")) 2601 return (void *)loader_gpa_instance_internal; 2602 if (!strcmp(pName, "vkCreateInstance")) 2603 return (void *)loader_CreateInstance; 2604 if (!strcmp(pName, "vkCreateDevice")) 2605 return (void *)loader_create_device_terminator; 2606 2607 // inst is not wrapped 2608 if (inst == VK_NULL_HANDLE) { 2609 return NULL; 2610 } 2611 VkLayerInstanceDispatchTable *disp_table = 2612 *(VkLayerInstanceDispatchTable **)inst; 2613 void *addr; 2614 2615 if (disp_table == NULL) 2616 return NULL; 2617 2618 addr = loader_lookup_instance_dispatch_table(disp_table, pName); 2619 if (addr) { 2620 return addr; 2621 } 2622 2623 if (disp_table->GetInstanceProcAddr == NULL) { 2624 return NULL; 2625 } 2626 return disp_table->GetInstanceProcAddr(inst, pName); 2627} 2628 2629/** 2630 * Initialize device_ext dispatch table entry as follows: 2631 * If dev == NULL find all logical devices created within this instance and 2632 * init the entry (given by idx) in the ext dispatch table. 2633 * If dev != NULL only initialize the entry in the given dev's dispatch table. 2634 * The initialization value is gotten by calling down the device chain with 2635 * GDPA. 2636 * If GDPA returns NULL then don't initialize the dispatch table entry. 2637 */ 2638static void loader_init_dispatch_dev_ext_entry(struct loader_instance *inst, 2639 struct loader_device *dev, 2640 uint32_t idx, 2641 const char *funcName) 2642 2643{ 2644 void *gdpa_value; 2645 if (dev != NULL) { 2646 gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr( 2647 dev->device, funcName); 2648 if (gdpa_value != NULL) 2649 dev->loader_dispatch.ext_dispatch.DevExt[idx] = 2650 (PFN_vkDevExt)gdpa_value; 2651 } else { 2652 for (uint32_t i = 0; i < inst->total_icd_count; i++) { 2653 struct loader_icd *icd = &inst->icds[i]; 2654 struct loader_device *ldev = icd->logical_device_list; 2655 while (ldev) { 2656 gdpa_value = 2657 ldev->loader_dispatch.core_dispatch.GetDeviceProcAddr( 2658 ldev->device, funcName); 2659 if (gdpa_value != NULL) 2660 ldev->loader_dispatch.ext_dispatch.DevExt[idx] = 2661 (PFN_vkDevExt)gdpa_value; 2662 ldev = ldev->next; 2663 } 2664 } 2665 } 2666} 2667 2668/** 2669 * Find all dev extension in the hash table and initialize the dispatch table 2670 * for dev for each of those extension entrypoints found in hash table. 2671 2672 */ 2673static void loader_init_dispatch_dev_ext(struct loader_instance *inst, 2674 struct loader_device *dev) { 2675 for (uint32_t i = 0; i < MAX_NUM_DEV_EXTS; i++) { 2676 if (inst->disp_hash[i].func_name != NULL) 2677 loader_init_dispatch_dev_ext_entry(inst, dev, i, 2678 inst->disp_hash[i].func_name); 2679 } 2680} 2681 2682static bool loader_check_icds_for_address(struct loader_instance *inst, 2683 const char *funcName) { 2684 struct loader_icd *icd; 2685 icd = inst->icds; 2686 while (icd) { 2687 if (icd->this_icd_lib->GetInstanceProcAddr(icd->instance, funcName)) 2688 // this icd supports funcName 2689 return true; 2690 icd = icd->next; 2691 } 2692 2693 return false; 2694} 2695 2696static void loader_free_dev_ext_table(struct loader_instance *inst) { 2697 for (uint32_t i = 0; i < MAX_NUM_DEV_EXTS; i++) { 2698 loader_heap_free(inst, inst->disp_hash[i].func_name); 2699 loader_heap_free(inst, inst->disp_hash[i].list.index); 2700 } 2701 memset(inst->disp_hash, 0, sizeof(inst->disp_hash)); 2702} 2703 2704static bool loader_add_dev_ext_table(struct loader_instance *inst, 2705 uint32_t *ptr_idx, const char *funcName) { 2706 uint32_t i; 2707 uint32_t idx = *ptr_idx; 2708 struct loader_dispatch_hash_list *list = &inst->disp_hash[idx].list; 2709 2710 if (!inst->disp_hash[idx].func_name) { 2711 // no entry here at this idx, so use it 2712 assert(list->capacity == 0); 2713 inst->disp_hash[idx].func_name = (char *)loader_heap_alloc( 2714 inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2715 if (inst->disp_hash[idx].func_name == NULL) { 2716 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2717 "loader_add_dev_ext_table() can't allocate memory for " 2718 "func_name"); 2719 return false; 2720 } 2721 strncpy(inst->disp_hash[idx].func_name, funcName, strlen(funcName) + 1); 2722 return true; 2723 } 2724 2725 // check for enough capacity 2726 if (list->capacity == 0) { 2727 list->index = loader_heap_alloc(inst, 8 * sizeof(*(list->index)), 2728 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2729 if (list->index == NULL) { 2730 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2731 "loader_add_dev_ext_table() can't allocate list memory"); 2732 return false; 2733 } 2734 list->capacity = 8 * sizeof(*(list->index)); 2735 } else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) { 2736 list->index = loader_heap_realloc(inst, list->index, list->capacity, 2737 list->capacity * 2, 2738 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2739 if (list->index == NULL) { 2740 loader_log( 2741 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2742 "loader_add_dev_ext_table() can't reallocate list memory"); 2743 return false; 2744 } 2745 list->capacity *= 2; 2746 } 2747 2748 // find an unused index in the hash table and use it 2749 i = (idx + 1) % MAX_NUM_DEV_EXTS; 2750 do { 2751 if (!inst->disp_hash[i].func_name) { 2752 assert(inst->disp_hash[i].list.capacity == 0); 2753 inst->disp_hash[i].func_name = 2754 (char *)loader_heap_alloc(inst, strlen(funcName) + 1, 2755 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2756 if (inst->disp_hash[i].func_name == NULL) { 2757 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2758 "loader_add_dev_ext_table() can't rallocate " 2759 "func_name memory"); 2760 return false; 2761 } 2762 strncpy(inst->disp_hash[i].func_name, funcName, 2763 strlen(funcName) + 1); 2764 list->index[list->count] = i; 2765 list->count++; 2766 *ptr_idx = i; 2767 return true; 2768 } 2769 i = (i + 1) % MAX_NUM_DEV_EXTS; 2770 } while (i != idx); 2771 2772 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2773 "loader_add_dev_ext_table() couldn't insert into hash table; is " 2774 "it full?"); 2775 return false; 2776} 2777 2778static bool loader_name_in_dev_ext_table(struct loader_instance *inst, 2779 uint32_t *idx, const char *funcName) { 2780 uint32_t alt_idx; 2781 if (inst->disp_hash[*idx].func_name && 2782 !strcmp(inst->disp_hash[*idx].func_name, funcName)) 2783 return true; 2784 2785 // funcName wasn't at the primary spot in the hash table 2786 // search the list of secondary locations (shallow search, not deep search) 2787 for (uint32_t i = 0; i < inst->disp_hash[*idx].list.count; i++) { 2788 alt_idx = inst->disp_hash[*idx].list.index[i]; 2789 if (!strcmp(inst->disp_hash[*idx].func_name, funcName)) { 2790 *idx = alt_idx; 2791 return true; 2792 } 2793 } 2794 2795 return false; 2796} 2797 2798/** 2799 * This function returns generic trampoline code address for unknown entry 2800 * points. 2801 * Presumably, these unknown entry points (as given by funcName) are device 2802 * extension entrypoints. A hash table is used to keep a list of unknown entry 2803 * points and their mapping to the device extension dispatch table 2804 * (struct loader_dev_ext_dispatch_table). 2805 * \returns 2806 * For a given entry point string (funcName), if an existing mapping is found 2807 * the 2808 * trampoline address for that mapping is returned. Otherwise, this unknown 2809 * entry point 2810 * has not been seen yet. Next check if a layer or ICD supports it. If so then 2811 * a 2812 * new entry in the hash table is initialized and that trampoline address for 2813 * the new entry is returned. Null is returned if the hash table is full or 2814 * if no discovered layer or ICD returns a non-NULL GetProcAddr for it. 2815 */ 2816void *loader_dev_ext_gpa(struct loader_instance *inst, const char *funcName) { 2817 uint32_t idx; 2818 uint32_t seed = 0; 2819 2820 idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_DEV_EXTS; 2821 2822 if (loader_name_in_dev_ext_table(inst, &idx, funcName)) 2823 // found funcName already in hash 2824 return loader_get_dev_ext_trampoline(idx); 2825 2826 // Check if funcName is supported in either ICDs or a layer library 2827 if (!loader_check_icds_for_address(inst, funcName)) { 2828 // TODO Add check in layer libraries for support of address 2829 // if support found in layers continue on 2830 return NULL; 2831 } 2832 2833 if (loader_add_dev_ext_table(inst, &idx, funcName)) { 2834 // successfully added new table entry 2835 // init any dev dispatch table entrys as needed 2836 loader_init_dispatch_dev_ext_entry(inst, NULL, idx, funcName); 2837 return loader_get_dev_ext_trampoline(idx); 2838 } 2839 2840 return NULL; 2841} 2842 2843struct loader_instance *loader_get_instance(const VkInstance instance) { 2844 /* look up the loader_instance in our list by comparing dispatch tables, as 2845 * there is no guarantee the instance is still a loader_instance* after any 2846 * layers which wrap the instance object. 2847 */ 2848 const VkLayerInstanceDispatchTable *disp; 2849 struct loader_instance *ptr_instance = NULL; 2850 disp = loader_get_instance_dispatch(instance); 2851 for (struct loader_instance *inst = loader.instances; inst; 2852 inst = inst->next) { 2853 if (inst->disp == disp) { 2854 ptr_instance = inst; 2855 break; 2856 } 2857 } 2858 return ptr_instance; 2859} 2860 2861static loader_platform_dl_handle 2862loader_add_layer_lib(const struct loader_instance *inst, const char *chain_type, 2863 struct loader_layer_properties *layer_prop) { 2864 struct loader_lib_info *new_layer_lib_list, *my_lib; 2865 size_t new_alloc_size; 2866 /* 2867 * TODO: We can now track this information in the 2868 * scanned_layer_libraries list. 2869 */ 2870 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2871 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, 2872 layer_prop->lib_name) == 0) { 2873 /* Have already loaded this library, just increment ref count */ 2874 loader.loaded_layer_lib_list[i].ref_count++; 2875 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2876 "%s Chain: Increment layer reference count for layer " 2877 "library %s", 2878 chain_type, layer_prop->lib_name); 2879 return loader.loaded_layer_lib_list[i].lib_handle; 2880 } 2881 } 2882 2883 /* Haven't seen this library so load it */ 2884 new_alloc_size = 0; 2885 if (loader.loaded_layer_lib_capacity == 0) 2886 new_alloc_size = 8 * sizeof(struct loader_lib_info); 2887 else if (loader.loaded_layer_lib_capacity <= 2888 loader.loaded_layer_lib_count * sizeof(struct loader_lib_info)) 2889 new_alloc_size = loader.loaded_layer_lib_capacity * 2; 2890 2891 if (new_alloc_size) { 2892 new_layer_lib_list = loader_heap_realloc( 2893 inst, loader.loaded_layer_lib_list, 2894 loader.loaded_layer_lib_capacity, new_alloc_size, 2895 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2896 if (!new_layer_lib_list) { 2897 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2898 "loader: realloc failed in loader_add_layer_lib"); 2899 return NULL; 2900 } 2901 loader.loaded_layer_lib_capacity = new_alloc_size; 2902 loader.loaded_layer_lib_list = new_layer_lib_list; 2903 } else 2904 new_layer_lib_list = loader.loaded_layer_lib_list; 2905 my_lib = &new_layer_lib_list[loader.loaded_layer_lib_count]; 2906 2907 strncpy(my_lib->lib_name, layer_prop->lib_name, sizeof(my_lib->lib_name)); 2908 my_lib->lib_name[sizeof(my_lib->lib_name) - 1] = '\0'; 2909 my_lib->ref_count = 0; 2910 my_lib->lib_handle = NULL; 2911 2912 if ((my_lib->lib_handle = loader_platform_open_library(my_lib->lib_name)) == 2913 NULL) { 2914 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2915 loader_platform_open_library_error(my_lib->lib_name)); 2916 return NULL; 2917 } else { 2918 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2919 "Chain: %s: Loading layer library %s", chain_type, 2920 layer_prop->lib_name); 2921 } 2922 loader.loaded_layer_lib_count++; 2923 my_lib->ref_count++; 2924 2925 return my_lib->lib_handle; 2926} 2927 2928static void 2929loader_remove_layer_lib(struct loader_instance *inst, 2930 struct loader_layer_properties *layer_prop) { 2931 uint32_t idx = loader.loaded_layer_lib_count; 2932 struct loader_lib_info *new_layer_lib_list, *my_lib = NULL; 2933 2934 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2935 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, 2936 layer_prop->lib_name) == 0) { 2937 /* found matching library */ 2938 idx = i; 2939 my_lib = &loader.loaded_layer_lib_list[i]; 2940 break; 2941 } 2942 } 2943 2944 if (idx == loader.loaded_layer_lib_count) { 2945 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2946 "Unable to unref library %s", layer_prop->lib_name); 2947 return; 2948 } 2949 2950 if (my_lib) { 2951 my_lib->ref_count--; 2952 if (my_lib->ref_count > 0) { 2953 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2954 "Decrement reference count for layer library %s", 2955 layer_prop->lib_name); 2956 return; 2957 } 2958 } 2959 loader_platform_close_library(my_lib->lib_handle); 2960 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2961 "Unloading layer library %s", layer_prop->lib_name); 2962 2963 /* Need to remove unused library from list */ 2964 new_layer_lib_list = 2965 loader_heap_alloc(inst, loader.loaded_layer_lib_capacity, 2966 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2967 if (!new_layer_lib_list) { 2968 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2969 "loader: heap alloc failed loader_remove_layer_library"); 2970 return; 2971 } 2972 2973 if (idx > 0) { 2974 /* Copy records before idx */ 2975 memcpy(new_layer_lib_list, &loader.loaded_layer_lib_list[0], 2976 sizeof(struct loader_lib_info) * idx); 2977 } 2978 if (idx < (loader.loaded_layer_lib_count - 1)) { 2979 /* Copy records after idx */ 2980 memcpy(&new_layer_lib_list[idx], &loader.loaded_layer_lib_list[idx + 1], 2981 sizeof(struct loader_lib_info) * 2982 (loader.loaded_layer_lib_count - idx - 1)); 2983 } 2984 2985 loader_heap_free(inst, loader.loaded_layer_lib_list); 2986 loader.loaded_layer_lib_count--; 2987 loader.loaded_layer_lib_list = new_layer_lib_list; 2988} 2989 2990/** 2991 * Go through the search_list and find any layers which match type. If layer 2992 * type match is found in then add it to ext_list. 2993 */ 2994static void 2995loader_add_layer_implicit(const struct loader_instance *inst, 2996 const enum layer_type type, 2997 struct loader_layer_list *list, 2998 const struct loader_layer_list *search_list) { 2999 bool enable; 3000 char *env_value; 3001 uint32_t i; 3002 for (i = 0; i < search_list->count; i++) { 3003 const struct loader_layer_properties *prop = &search_list->list[i]; 3004 if (prop->type & type) { 3005 /* Found an implicit layer, see if it should be enabled */ 3006 enable = false; 3007 3008 // if no enable_environment variable is specified, this implicit 3009 // layer 3010 // should always be enabled. Otherwise check if the variable is set 3011 if (prop->enable_env_var.name[0] == 0) { 3012 enable = true; 3013 } else { 3014 env_value = loader_getenv(prop->enable_env_var.name); 3015 if (env_value && !strcmp(prop->enable_env_var.value, env_value)) 3016 enable = true; 3017 loader_free_getenv(env_value); 3018 } 3019 3020 // disable_environment has priority, i.e. if both enable and disable 3021 // environment variables are set, the layer is disabled. Implicit 3022 // layers 3023 // are required to have a disable_environment variables 3024 env_value = loader_getenv(prop->disable_env_var.name); 3025 if (env_value) 3026 enable = false; 3027 loader_free_getenv(env_value); 3028 3029 if (enable) 3030 loader_add_to_layer_list(inst, list, 1, prop); 3031 } 3032 } 3033} 3034 3035/** 3036 * Get the layer name(s) from the env_name environment variable. If layer 3037 * is found in search_list then add it to layer_list. But only add it to 3038 * layer_list if type matches. 3039 */ 3040static void loader_add_layer_env(const struct loader_instance *inst, 3041 const enum layer_type type, 3042 const char *env_name, 3043 struct loader_layer_list *layer_list, 3044 const struct loader_layer_list *search_list) { 3045 char *layerEnv; 3046 char *next, *name; 3047 3048 layerEnv = loader_getenv(env_name); 3049 if (layerEnv == NULL) { 3050 return; 3051 } 3052 name = loader_stack_alloc(strlen(layerEnv) + 1); 3053 if (name == NULL) { 3054 return; 3055 } 3056 strcpy(name, layerEnv); 3057 3058 loader_free_getenv(layerEnv); 3059 3060 while (name && *name) { 3061 next = loader_get_next_path(name); 3062 if (!strcmp(std_validation_str, name)) { 3063 /* add meta list of layers 3064 don't attempt to remove duplicate layers already added by app or 3065 env var 3066 */ 3067 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, 3068 "Expanding meta layer %s found in environment variable", 3069 std_validation_str); 3070 for (uint32_t i = 0; i < sizeof(std_validation_names) / 3071 sizeof(std_validation_names[0]); 3072 i++) { 3073 loader_find_layer_name_add_list(inst, std_validation_names[i], 3074 type, search_list, layer_list); 3075 } 3076 } else { 3077 loader_find_layer_name_add_list(inst, name, type, search_list, 3078 layer_list); 3079 } 3080 name = next; 3081 } 3082 3083 return; 3084} 3085 3086void loader_deactivate_instance_layers(struct loader_instance *instance) { 3087 /* Create instance chain of enabled layers */ 3088 for (uint32_t i = 0; i < instance->activated_layer_list.count; i++) { 3089 struct loader_layer_properties *layer_prop = 3090 &instance->activated_layer_list.list[i]; 3091 3092 loader_remove_layer_lib(instance, layer_prop); 3093 } 3094 loader_destroy_layer_list(instance, &instance->activated_layer_list); 3095} 3096 3097VkResult 3098loader_enable_instance_layers(struct loader_instance *inst, 3099 const VkInstanceCreateInfo *pCreateInfo, 3100 const struct loader_layer_list *instance_layers) { 3101 VkResult err; 3102 3103 assert(inst && "Cannot have null instance"); 3104 3105 if (!loader_init_layer_list(inst, &inst->activated_layer_list)) { 3106 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3107 "Failed to alloc Instance activated layer list"); 3108 return VK_ERROR_OUT_OF_HOST_MEMORY; 3109 } 3110 3111 /* Add any implicit layers first */ 3112 loader_add_layer_implicit(inst, VK_LAYER_TYPE_INSTANCE_IMPLICIT, 3113 &inst->activated_layer_list, instance_layers); 3114 3115 /* Add any layers specified via environment variable next */ 3116 loader_add_layer_env(inst, VK_LAYER_TYPE_INSTANCE_EXPLICIT, 3117 "VK_INSTANCE_LAYERS", &inst->activated_layer_list, 3118 instance_layers); 3119 3120 /* Add layers specified by the application */ 3121 err = loader_add_layer_names_to_list( 3122 inst, &inst->activated_layer_list, pCreateInfo->enabledLayerCount, 3123 pCreateInfo->ppEnabledLayerNames, instance_layers); 3124 3125 return err; 3126} 3127 3128/* 3129 * Given the list of layers to activate in the loader_instance 3130 * structure. This function will add a VkLayerInstanceCreateInfo 3131 * structure to the VkInstanceCreateInfo.pNext pointer. 3132 * Each activated layer will have it's own VkLayerInstanceLink 3133 * structure that tells the layer what Get*ProcAddr to call to 3134 * get function pointers to the next layer down. 3135 * Once the chain info has been created this function will 3136 * execute the CreateInstance call chain. Each layer will 3137 * then have an opportunity in it's CreateInstance function 3138 * to setup it's dispatch table when the lower layer returns 3139 * successfully. 3140 * Each layer can wrap or not-wrap the returned VkInstance object 3141 * as it sees fit. 3142 * The instance chain is terminated by a loader function 3143 * that will call CreateInstance on all available ICD's and 3144 * cache those VkInstance objects for future use. 3145 */ 3146VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, 3147 const VkAllocationCallbacks *pAllocator, 3148 struct loader_instance *inst, 3149 VkInstance *created_instance) { 3150 uint32_t activated_layers = 0; 3151 VkLayerInstanceCreateInfo chain_info; 3152 VkLayerInstanceLink *layer_instance_link_info = NULL; 3153 VkInstanceCreateInfo loader_create_info; 3154 VkResult res; 3155 3156 PFN_vkGetInstanceProcAddr nextGIPA = loader_gpa_instance_internal; 3157 PFN_vkGetInstanceProcAddr fpGIPA = loader_gpa_instance_internal; 3158 3159 memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo)); 3160 3161 if (inst->activated_layer_list.count > 0) { 3162 3163 chain_info.u.pLayerInfo = NULL; 3164 chain_info.pNext = pCreateInfo->pNext; 3165 chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; 3166 chain_info.function = VK_LAYER_LINK_INFO; 3167 loader_create_info.pNext = &chain_info; 3168 3169 layer_instance_link_info = loader_stack_alloc( 3170 sizeof(VkLayerInstanceLink) * inst->activated_layer_list.count); 3171 if (!layer_instance_link_info) { 3172 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3173 "Failed to alloc Instance objects for layer"); 3174 return VK_ERROR_OUT_OF_HOST_MEMORY; 3175 } 3176 3177 /* Create instance chain of enabled layers */ 3178 for (int32_t i = inst->activated_layer_list.count - 1; i >= 0; i--) { 3179 struct loader_layer_properties *layer_prop = 3180 &inst->activated_layer_list.list[i]; 3181 loader_platform_dl_handle lib_handle; 3182 3183 lib_handle = loader_add_layer_lib(inst, "instance", layer_prop); 3184 if (!lib_handle) 3185 continue; 3186 if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == 3187 NULL) { 3188 if (layer_prop->functions.str_gipa == NULL || 3189 strlen(layer_prop->functions.str_gipa) == 0) { 3190 fpGIPA = (PFN_vkGetInstanceProcAddr) 3191 loader_platform_get_proc_address( 3192 lib_handle, "vkGetInstanceProcAddr"); 3193 layer_prop->functions.get_instance_proc_addr = fpGIPA; 3194 } else 3195 fpGIPA = (PFN_vkGetInstanceProcAddr) 3196 loader_platform_get_proc_address( 3197 lib_handle, layer_prop->functions.str_gipa); 3198 if (!fpGIPA) { 3199 loader_log( 3200 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3201 "Failed to find vkGetInstanceProcAddr in layer %s", 3202 layer_prop->lib_name); 3203 continue; 3204 } 3205 } 3206 3207 layer_instance_link_info[activated_layers].pNext = 3208 chain_info.u.pLayerInfo; 3209 layer_instance_link_info[activated_layers] 3210 .pfnNextGetInstanceProcAddr = nextGIPA; 3211 chain_info.u.pLayerInfo = 3212 &layer_instance_link_info[activated_layers]; 3213 nextGIPA = fpGIPA; 3214 3215 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, 3216 "Insert instance layer %s (%s)", 3217 layer_prop->info.layerName, layer_prop->lib_name); 3218 3219 activated_layers++; 3220 } 3221 } 3222 3223 PFN_vkCreateInstance fpCreateInstance = 3224 (PFN_vkCreateInstance)nextGIPA(*created_instance, "vkCreateInstance"); 3225 if (fpCreateInstance) { 3226 VkLayerInstanceCreateInfo instance_create_info; 3227 3228 instance_create_info.sType = 3229 VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; 3230 instance_create_info.function = VK_LAYER_INSTANCE_INFO; 3231 3232 instance_create_info.u.instanceInfo.instance_info = inst; 3233 instance_create_info.u.instanceInfo.pfnNextGetInstanceProcAddr = 3234 nextGIPA; 3235 3236 instance_create_info.pNext = loader_create_info.pNext; 3237 loader_create_info.pNext = &instance_create_info; 3238 3239 res = 3240 fpCreateInstance(&loader_create_info, pAllocator, created_instance); 3241 } else { 3242 // Couldn't find CreateInstance function! 3243 res = VK_ERROR_INITIALIZATION_FAILED; 3244 } 3245 3246 if (res != VK_SUCCESS) { 3247 // TODO: Need to clean up here 3248 } else { 3249 loader_init_instance_core_dispatch_table(inst->disp, nextGIPA, 3250 *created_instance); 3251 } 3252 3253 return res; 3254} 3255 3256void loader_activate_instance_layer_extensions(struct loader_instance *inst, 3257 VkInstance created_inst) { 3258 3259 loader_init_instance_extension_dispatch_table( 3260 inst->disp, inst->disp->GetInstanceProcAddr, created_inst); 3261} 3262 3263static VkResult 3264loader_enable_device_layers(const struct loader_instance *inst, 3265 struct loader_icd *icd, 3266 struct loader_layer_list *activated_layer_list, 3267 const VkDeviceCreateInfo *pCreateInfo, 3268 const struct loader_layer_list *device_layers) 3269 3270{ 3271 VkResult err; 3272 3273 assert(activated_layer_list && "Cannot have null output layer list"); 3274 3275 if (activated_layer_list->list == NULL || 3276 activated_layer_list->capacity == 0) { 3277 loader_init_layer_list(inst, activated_layer_list); 3278 } 3279 3280 if (activated_layer_list->list == NULL) { 3281 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3282 "Failed to alloc device activated layer list"); 3283 return VK_ERROR_OUT_OF_HOST_MEMORY; 3284 } 3285 3286 /* Add any implicit layers first */ 3287 loader_add_layer_implicit(inst, VK_LAYER_TYPE_DEVICE_IMPLICIT, 3288 activated_layer_list, device_layers); 3289 3290 /* Add any layers specified via environment variable next */ 3291 loader_add_layer_env(inst, VK_LAYER_TYPE_DEVICE_EXPLICIT, 3292 "VK_DEVICE_LAYERS", activated_layer_list, 3293 device_layers); 3294 3295 /* Add layers specified by the application */ 3296 err = loader_add_layer_names_to_list( 3297 inst, activated_layer_list, pCreateInfo->enabledLayerCount, 3298 pCreateInfo->ppEnabledLayerNames, device_layers); 3299 3300 return err; 3301} 3302 3303VKAPI_ATTR VkResult VKAPI_CALL 3304loader_create_device_terminator(VkPhysicalDevice physicalDevice, 3305 const VkDeviceCreateInfo *pCreateInfo, 3306 const VkAllocationCallbacks *pAllocator, 3307 VkDevice *pDevice) { 3308 struct loader_physical_device *phys_dev; 3309 phys_dev = loader_get_physical_device(physicalDevice); 3310 3311 VkLayerDeviceCreateInfo *chain_info = 3312 (VkLayerDeviceCreateInfo *)pCreateInfo->pNext; 3313 while (chain_info && 3314 !(chain_info->sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO && 3315 chain_info->function == VK_LAYER_DEVICE_INFO)) { 3316 chain_info = (VkLayerDeviceCreateInfo *)chain_info->pNext; 3317 } 3318 assert(chain_info != NULL); 3319 3320 struct loader_device *dev = 3321 (struct loader_device *)chain_info->u.deviceInfo.device_info; 3322 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = 3323 chain_info->u.deviceInfo.pfnNextGetInstanceProcAddr; 3324 PFN_vkCreateDevice fpCreateDevice = 3325 (PFN_vkCreateDevice)fpGetInstanceProcAddr(phys_dev->this_icd->instance, 3326 "vkCreateDevice"); 3327 if (fpCreateDevice == NULL) { 3328 return VK_ERROR_INITIALIZATION_FAILED; 3329 } 3330 3331 VkDeviceCreateInfo localCreateInfo; 3332 memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo)); 3333 localCreateInfo.pNext = loader_strip_create_extensions(pCreateInfo->pNext); 3334 3335 /* 3336 * NOTE: Need to filter the extensions to only those 3337 * supported by the ICD. 3338 * No ICD will advertise support for layers. An ICD 3339 * library could support a layer, but it would be 3340 * independent of the actual ICD, just in the same library. 3341 */ 3342 char **filtered_extension_names = NULL; 3343 filtered_extension_names = 3344 loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *)); 3345 if (!filtered_extension_names) { 3346 return VK_ERROR_OUT_OF_HOST_MEMORY; 3347 } 3348 3349 localCreateInfo.enabledLayerCount = 0; 3350 localCreateInfo.ppEnabledLayerNames = NULL; 3351 3352 localCreateInfo.enabledExtensionCount = 0; 3353 localCreateInfo.ppEnabledExtensionNames = 3354 (const char *const *)filtered_extension_names; 3355 3356 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 3357 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 3358 VkExtensionProperties *prop = get_extension_property( 3359 extension_name, &phys_dev->device_extension_cache); 3360 if (prop) { 3361 filtered_extension_names[localCreateInfo.enabledExtensionCount] = 3362 (char *)extension_name; 3363 localCreateInfo.enabledExtensionCount++; 3364 } 3365 } 3366 3367 VkDevice localDevice; 3368 // TODO: Why does fpCreateDevice behave differently than 3369 // this_icd->CreateDevice? 3370 // VkResult res = fpCreateDevice(phys_dev->phys_dev, &localCreateInfo, 3371 // pAllocator, &localDevice); 3372 VkResult res = phys_dev->this_icd->CreateDevice( 3373 phys_dev->phys_dev, &localCreateInfo, pAllocator, &localDevice); 3374 3375 if (res != VK_SUCCESS) { 3376 return res; 3377 } 3378 3379 *pDevice = localDevice; 3380 3381 /* Init dispatch pointer in new device object */ 3382 loader_init_dispatch(*pDevice, &dev->loader_dispatch); 3383 3384 return res; 3385} 3386 3387VkResult loader_create_device_chain(VkPhysicalDevice physicalDevice, 3388 const VkDeviceCreateInfo *pCreateInfo, 3389 const VkAllocationCallbacks *pAllocator, 3390 struct loader_instance *inst, 3391 struct loader_icd *icd, 3392 struct loader_device *dev) { 3393 uint32_t activated_layers = 0; 3394 VkLayerDeviceLink *layer_device_link_info; 3395 VkLayerDeviceCreateInfo chain_info; 3396 VkLayerDeviceCreateInfo device_info; 3397 VkDeviceCreateInfo loader_create_info; 3398 VkResult res; 3399 3400 PFN_vkGetDeviceProcAddr fpGDPA, nextGDPA = icd->GetDeviceProcAddr; 3401 PFN_vkGetInstanceProcAddr fpGIPA, nextGIPA = loader_gpa_instance_internal; 3402 3403 memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo)); 3404 3405 chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO; 3406 chain_info.function = VK_LAYER_LINK_INFO; 3407 chain_info.u.pLayerInfo = NULL; 3408 chain_info.pNext = pCreateInfo->pNext; 3409 3410 layer_device_link_info = loader_stack_alloc( 3411 sizeof(VkLayerDeviceLink) * dev->activated_layer_list.count); 3412 if (!layer_device_link_info) { 3413 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3414 "Failed to alloc Device objects for layer"); 3415 return VK_ERROR_OUT_OF_HOST_MEMORY; 3416 } 3417 3418 /* 3419 * This structure is used by loader_create_device_terminator 3420 * so that it can intialize the device dispatch table pointer 3421 * in the device object returned by the ICD. Without this 3422 * structure the code wouldn't know where the loader's device_info 3423 * structure is located. 3424 */ 3425 device_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO; 3426 device_info.function = VK_LAYER_DEVICE_INFO; 3427 device_info.pNext = &chain_info; 3428 device_info.u.deviceInfo.device_info = dev; 3429 device_info.u.deviceInfo.pfnNextGetInstanceProcAddr = 3430 icd->this_icd_lib->GetInstanceProcAddr; 3431 3432 loader_create_info.pNext = &device_info; 3433 3434 if (dev->activated_layer_list.count > 0) { 3435 /* Create instance chain of enabled layers */ 3436 for (int32_t i = dev->activated_layer_list.count - 1; i >= 0; i--) { 3437 struct loader_layer_properties *layer_prop = 3438 &dev->activated_layer_list.list[i]; 3439 loader_platform_dl_handle lib_handle; 3440 3441 lib_handle = loader_add_layer_lib(inst, "device", layer_prop); 3442 if (!lib_handle) 3443 continue; 3444 if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == 3445 NULL) { 3446 if (layer_prop->functions.str_gipa == NULL || 3447 strlen(layer_prop->functions.str_gipa) == 0) { 3448 fpGIPA = (PFN_vkGetInstanceProcAddr) 3449 loader_platform_get_proc_address( 3450 lib_handle, "vkGetInstanceProcAddr"); 3451 layer_prop->functions.get_instance_proc_addr = fpGIPA; 3452 } else 3453 fpGIPA = (PFN_vkGetInstanceProcAddr) 3454 loader_platform_get_proc_address( 3455 lib_handle, layer_prop->functions.str_gipa); 3456 if (!fpGIPA) { 3457 loader_log( 3458 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3459 "Failed to find vkGetInstanceProcAddr in layer %s", 3460 layer_prop->lib_name); 3461 continue; 3462 } 3463 } 3464 if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) { 3465 if (layer_prop->functions.str_gdpa == NULL || 3466 strlen(layer_prop->functions.str_gdpa) == 0) { 3467 fpGDPA = (PFN_vkGetDeviceProcAddr) 3468 loader_platform_get_proc_address(lib_handle, 3469 "vkGetDeviceProcAddr"); 3470 layer_prop->functions.get_device_proc_addr = fpGDPA; 3471 } else 3472 fpGDPA = (PFN_vkGetDeviceProcAddr) 3473 loader_platform_get_proc_address( 3474 lib_handle, layer_prop->functions.str_gdpa); 3475 if (!fpGDPA) { 3476 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3477 "Failed to find vkGetDeviceProcAddr in layer %s", 3478 layer_prop->lib_name); 3479 continue; 3480 } 3481 } 3482 3483 layer_device_link_info[activated_layers].pNext = 3484 chain_info.u.pLayerInfo; 3485 layer_device_link_info[activated_layers] 3486 .pfnNextGetInstanceProcAddr = nextGIPA; 3487 layer_device_link_info[activated_layers].pfnNextGetDeviceProcAddr = 3488 nextGDPA; 3489 chain_info.u.pLayerInfo = &layer_device_link_info[activated_layers]; 3490 nextGIPA = fpGIPA; 3491 nextGDPA = fpGDPA; 3492 3493 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, 3494 "Insert device layer %s (%s)", 3495 layer_prop->info.layerName, layer_prop->lib_name); 3496 3497 activated_layers++; 3498 } 3499 } 3500 3501 PFN_vkCreateDevice fpCreateDevice = 3502 (PFN_vkCreateDevice)nextGIPA((VkInstance)inst, "vkCreateDevice"); 3503 if (fpCreateDevice) { 3504 res = fpCreateDevice(physicalDevice, &loader_create_info, pAllocator, 3505 &dev->device); 3506 } else { 3507 // Couldn't find CreateDevice function! 3508 return VK_ERROR_INITIALIZATION_FAILED; 3509 } 3510 3511 /* Initialize device dispatch table */ 3512 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA, 3513 dev->device); 3514 3515 return res; 3516} 3517 3518VkResult loader_validate_layers(const struct loader_instance *inst, 3519 const uint32_t layer_count, 3520 const char *const *ppEnabledLayerNames, 3521 const struct loader_layer_list *list) { 3522 struct loader_layer_properties *prop; 3523 3524 for (uint32_t i = 0; i < layer_count; i++) { 3525 VkStringErrorFlags result = 3526 vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]); 3527 if (result != VK_STRING_ERROR_NONE) { 3528 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3529 "Loader: Device ppEnabledLayerNames contains string " 3530 "that is too long or is badly formed"); 3531 return VK_ERROR_LAYER_NOT_PRESENT; 3532 } 3533 3534 prop = loader_get_layer_property(ppEnabledLayerNames[i], list); 3535 if (!prop) { 3536 return VK_ERROR_LAYER_NOT_PRESENT; 3537 } 3538 } 3539 return VK_SUCCESS; 3540} 3541 3542VkResult loader_validate_instance_extensions( 3543 const struct loader_instance *inst, 3544 const struct loader_extension_list *icd_exts, 3545 const struct loader_layer_list *instance_layer, 3546 const VkInstanceCreateInfo *pCreateInfo) { 3547 3548 VkExtensionProperties *extension_prop; 3549 struct loader_layer_properties *layer_prop; 3550 3551 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 3552 VkStringErrorFlags result = vk_string_validate( 3553 MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]); 3554 if (result != VK_STRING_ERROR_NONE) { 3555 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3556 "Loader: Instance ppEnabledExtensionNames contains " 3557 "string that is too long or is badly formed"); 3558 return VK_ERROR_EXTENSION_NOT_PRESENT; 3559 } 3560 3561 extension_prop = get_extension_property( 3562 pCreateInfo->ppEnabledExtensionNames[i], icd_exts); 3563 3564 if (extension_prop) { 3565 continue; 3566 } 3567 3568 extension_prop = NULL; 3569 3570 /* Not in global list, search layer extension lists */ 3571 for (uint32_t j = 0; j < pCreateInfo->enabledLayerCount; j++) { 3572 layer_prop = loader_get_layer_property( 3573 pCreateInfo->ppEnabledLayerNames[i], instance_layer); 3574 if (!layer_prop) { 3575 /* Should NOT get here, loader_validate_layers 3576 * should have already filtered this case out. 3577 */ 3578 continue; 3579 } 3580 3581 extension_prop = 3582 get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 3583 &layer_prop->instance_extension_list); 3584 if (extension_prop) { 3585 /* Found the extension in one of the layers enabled by the app. 3586 */ 3587 break; 3588 } 3589 } 3590 3591 if (!extension_prop) { 3592 /* Didn't find extension name in any of the global layers, error out 3593 */ 3594 return VK_ERROR_EXTENSION_NOT_PRESENT; 3595 } 3596 } 3597 return VK_SUCCESS; 3598} 3599 3600VkResult loader_validate_device_extensions( 3601 struct loader_physical_device *phys_dev, 3602 const struct loader_layer_list *activated_device_layers, 3603 const VkDeviceCreateInfo *pCreateInfo) { 3604 VkExtensionProperties *extension_prop; 3605 struct loader_layer_properties *layer_prop; 3606 3607 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 3608 3609 VkStringErrorFlags result = vk_string_validate( 3610 MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]); 3611 if (result != VK_STRING_ERROR_NONE) { 3612 loader_log(phys_dev->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 3613 0, "Loader: Device ppEnabledExtensionNames contains " 3614 "string that is too long or is badly formed"); 3615 return VK_ERROR_EXTENSION_NOT_PRESENT; 3616 } 3617 3618 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 3619 extension_prop = get_extension_property( 3620 extension_name, &phys_dev->device_extension_cache); 3621 3622 if (extension_prop) { 3623 continue; 3624 } 3625 3626 /* Not in global list, search activated layer extension lists */ 3627 for (uint32_t j = 0; j < activated_device_layers->count; j++) { 3628 layer_prop = &activated_device_layers->list[j]; 3629 3630 extension_prop = get_dev_extension_property( 3631 extension_name, &layer_prop->device_extension_list); 3632 if (extension_prop) { 3633 /* Found the extension in one of the layers enabled by the app. 3634 */ 3635 break; 3636 } 3637 } 3638 3639 if (!extension_prop) { 3640 /* Didn't find extension name in any of the device layers, error out 3641 */ 3642 return VK_ERROR_EXTENSION_NOT_PRESENT; 3643 } 3644 } 3645 return VK_SUCCESS; 3646} 3647 3648VKAPI_ATTR VkResult VKAPI_CALL 3649loader_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, 3650 const VkAllocationCallbacks *pAllocator, 3651 VkInstance *pInstance) { 3652 struct loader_icd *icd; 3653 VkExtensionProperties *prop; 3654 char **filtered_extension_names = NULL; 3655 VkInstanceCreateInfo icd_create_info; 3656 VkResult res = VK_SUCCESS; 3657 bool success = false; 3658 3659 VkLayerInstanceCreateInfo *chain_info = 3660 (VkLayerInstanceCreateInfo *)pCreateInfo->pNext; 3661 while ( 3662 chain_info && 3663 !(chain_info->sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO && 3664 chain_info->function == VK_LAYER_INSTANCE_INFO)) { 3665 chain_info = (VkLayerInstanceCreateInfo *)chain_info->pNext; 3666 } 3667 assert(chain_info != NULL); 3668 3669 struct loader_instance *ptr_instance = 3670 (struct loader_instance *)chain_info->u.instanceInfo.instance_info; 3671 memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info)); 3672 3673 icd_create_info.enabledLayerCount = 0; 3674 icd_create_info.ppEnabledLayerNames = NULL; 3675 3676 // strip off the VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO entries 3677 icd_create_info.pNext = loader_strip_create_extensions(pCreateInfo->pNext); 3678 3679 /* 3680 * NOTE: Need to filter the extensions to only those 3681 * supported by the ICD. 3682 * No ICD will advertise support for layers. An ICD 3683 * library could support a layer, but it would be 3684 * independent of the actual ICD, just in the same library. 3685 */ 3686 filtered_extension_names = 3687 loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *)); 3688 if (!filtered_extension_names) { 3689 return VK_ERROR_OUT_OF_HOST_MEMORY; 3690 } 3691 icd_create_info.ppEnabledExtensionNames = 3692 (const char *const *)filtered_extension_names; 3693 3694 for (uint32_t i = 0; i < ptr_instance->icd_libs.count; i++) { 3695 icd = loader_icd_add(ptr_instance, &ptr_instance->icd_libs.list[i]); 3696 if (icd) { 3697 icd_create_info.enabledExtensionCount = 0; 3698 struct loader_extension_list icd_exts; 3699 3700 loader_log(ptr_instance, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 3701 "Build ICD instance extension list"); 3702 // traverse scanned icd list adding non-duplicate extensions to the 3703 // list 3704 loader_init_generic_list(ptr_instance, 3705 (struct loader_generic_list *)&icd_exts, 3706 sizeof(VkExtensionProperties)); 3707 loader_add_instance_extensions( 3708 ptr_instance, 3709 icd->this_icd_lib->EnumerateInstanceExtensionProperties, 3710 icd->this_icd_lib->lib_name, &icd_exts); 3711 3712 for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) { 3713 prop = get_extension_property( 3714 pCreateInfo->ppEnabledExtensionNames[j], &icd_exts); 3715 if (prop) { 3716 filtered_extension_names[icd_create_info 3717 .enabledExtensionCount] = 3718 (char *)pCreateInfo->ppEnabledExtensionNames[j]; 3719 icd_create_info.enabledExtensionCount++; 3720 } 3721 } 3722 3723 loader_destroy_generic_list( 3724 ptr_instance, (struct loader_generic_list *)&icd_exts); 3725 3726 res = ptr_instance->icd_libs.list[i].CreateInstance( 3727 &icd_create_info, pAllocator, &(icd->instance)); 3728 if (res == VK_SUCCESS) 3729 success = loader_icd_init_entrys( 3730 icd, icd->instance, 3731 ptr_instance->icd_libs.list[i].GetInstanceProcAddr); 3732 3733 if (res != VK_SUCCESS || !success) { 3734 ptr_instance->icds = ptr_instance->icds->next; 3735 loader_icd_destroy(ptr_instance, icd); 3736 loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3737 "ICD ignored: failed to CreateInstance and find " 3738 "entrypoints with ICD"); 3739 } 3740 } 3741 } 3742 3743 /* 3744 * If no ICDs were added to instance list and res is unchanged 3745 * from it's initial value, the loader was unable to find 3746 * a suitable ICD. 3747 */ 3748 if (ptr_instance->icds == NULL) { 3749 if (res == VK_SUCCESS) { 3750 return VK_ERROR_INCOMPATIBLE_DRIVER; 3751 } else { 3752 return res; 3753 } 3754 } 3755 3756 return VK_SUCCESS; 3757} 3758 3759VKAPI_ATTR void VKAPI_CALL 3760loader_DestroyInstance(VkInstance instance, 3761 const VkAllocationCallbacks *pAllocator) { 3762 struct loader_instance *ptr_instance = loader_instance(instance); 3763 struct loader_icd *icds = ptr_instance->icds; 3764 struct loader_icd *next_icd; 3765 3766 // Remove this instance from the list of instances: 3767 struct loader_instance *prev = NULL; 3768 struct loader_instance *next = loader.instances; 3769 while (next != NULL) { 3770 if (next == ptr_instance) { 3771 // Remove this instance from the list: 3772 if (prev) 3773 prev->next = next->next; 3774 else 3775 loader.instances = next->next; 3776 break; 3777 } 3778 prev = next; 3779 next = next->next; 3780 } 3781 3782 while (icds) { 3783 if (icds->instance) { 3784 icds->DestroyInstance(icds->instance, pAllocator); 3785 } 3786 next_icd = icds->next; 3787 icds->instance = VK_NULL_HANDLE; 3788 loader_icd_destroy(ptr_instance, icds); 3789 3790 icds = next_icd; 3791 } 3792 loader_delete_layer_properties(ptr_instance, 3793 &ptr_instance->device_layer_list); 3794 loader_delete_layer_properties(ptr_instance, 3795 &ptr_instance->instance_layer_list); 3796 loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_libs); 3797 loader_destroy_generic_list( 3798 ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list); 3799 for (uint32_t i = 0; i < ptr_instance->total_gpu_count; i++) 3800 loader_destroy_generic_list( 3801 ptr_instance, 3802 (struct loader_generic_list *)&ptr_instance->phys_devs[i] 3803 .device_extension_cache); 3804 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3805 loader_free_dev_ext_table(ptr_instance); 3806} 3807 3808VkResult 3809loader_init_physical_device_info(struct loader_instance *ptr_instance) { 3810 struct loader_icd *icd; 3811 uint32_t i, j, idx, count = 0; 3812 VkResult res; 3813 struct loader_phys_dev_per_icd *phys_devs; 3814 3815 ptr_instance->total_gpu_count = 0; 3816 phys_devs = (struct loader_phys_dev_per_icd *)loader_stack_alloc( 3817 sizeof(struct loader_phys_dev_per_icd) * ptr_instance->total_icd_count); 3818 if (!phys_devs) 3819 return VK_ERROR_OUT_OF_HOST_MEMORY; 3820 3821 icd = ptr_instance->icds; 3822 for (i = 0; i < ptr_instance->total_icd_count; i++) { 3823 assert(icd); 3824 res = icd->EnumeratePhysicalDevices(icd->instance, &phys_devs[i].count, 3825 NULL); 3826 if (res != VK_SUCCESS) 3827 return res; 3828 count += phys_devs[i].count; 3829 icd = icd->next; 3830 } 3831 3832 ptr_instance->phys_devs = 3833 (struct loader_physical_device *)loader_heap_alloc( 3834 ptr_instance, count * sizeof(struct loader_physical_device), 3835 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 3836 if (!ptr_instance->phys_devs) 3837 return VK_ERROR_OUT_OF_HOST_MEMORY; 3838 3839 icd = ptr_instance->icds; 3840 3841 struct loader_physical_device *inst_phys_devs = ptr_instance->phys_devs; 3842 idx = 0; 3843 for (i = 0; i < ptr_instance->total_icd_count; i++) { 3844 assert(icd); 3845 3846 phys_devs[i].phys_devs = (VkPhysicalDevice *)loader_stack_alloc( 3847 phys_devs[i].count * sizeof(VkPhysicalDevice)); 3848 if (!phys_devs[i].phys_devs) { 3849 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3850 ptr_instance->phys_devs = NULL; 3851 return VK_ERROR_OUT_OF_HOST_MEMORY; 3852 } 3853 res = icd->EnumeratePhysicalDevices( 3854 icd->instance, &(phys_devs[i].count), phys_devs[i].phys_devs); 3855 if ((res == VK_SUCCESS)) { 3856 ptr_instance->total_gpu_count += phys_devs[i].count; 3857 for (j = 0; j < phys_devs[i].count; j++) { 3858 3859 // initialize the loader's physicalDevice object 3860 loader_set_dispatch((void *)&inst_phys_devs[idx], 3861 ptr_instance->disp); 3862 inst_phys_devs[idx].this_instance = ptr_instance; 3863 inst_phys_devs[idx].this_icd = icd; 3864 inst_phys_devs[idx].phys_dev = phys_devs[i].phys_devs[j]; 3865 memset(&inst_phys_devs[idx].device_extension_cache, 0, 3866 sizeof(struct loader_extension_list)); 3867 3868 idx++; 3869 } 3870 } else { 3871 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3872 ptr_instance->phys_devs = NULL; 3873 return res; 3874 } 3875 3876 icd = icd->next; 3877 } 3878 3879 return VK_SUCCESS; 3880} 3881 3882VKAPI_ATTR VkResult VKAPI_CALL 3883loader_EnumeratePhysicalDevices(VkInstance instance, 3884 uint32_t *pPhysicalDeviceCount, 3885 VkPhysicalDevice *pPhysicalDevices) { 3886 uint32_t i; 3887 uint32_t copy_count = 0; 3888 struct loader_instance *ptr_instance = (struct loader_instance *)instance; 3889 VkResult res = VK_SUCCESS; 3890 3891 if (ptr_instance->total_gpu_count == 0) { 3892 res = loader_init_physical_device_info(ptr_instance); 3893 } 3894 3895 *pPhysicalDeviceCount = ptr_instance->total_gpu_count; 3896 if (!pPhysicalDevices) { 3897 return res; 3898 } 3899 3900 copy_count = (ptr_instance->total_gpu_count < *pPhysicalDeviceCount) 3901 ? ptr_instance->total_gpu_count 3902 : *pPhysicalDeviceCount; 3903 for (i = 0; i < copy_count; i++) { 3904 pPhysicalDevices[i] = (VkPhysicalDevice)&ptr_instance->phys_devs[i]; 3905 } 3906 *pPhysicalDeviceCount = copy_count; 3907 3908 if (copy_count < ptr_instance->total_gpu_count) { 3909 return VK_INCOMPLETE; 3910 } 3911 3912 return res; 3913} 3914 3915VKAPI_ATTR void VKAPI_CALL 3916loader_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, 3917 VkPhysicalDeviceProperties *pProperties) { 3918 struct loader_physical_device *phys_dev = 3919 (struct loader_physical_device *)physicalDevice; 3920 struct loader_icd *icd = phys_dev->this_icd; 3921 3922 if (icd->GetPhysicalDeviceProperties) 3923 icd->GetPhysicalDeviceProperties(phys_dev->phys_dev, pProperties); 3924} 3925 3926VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceQueueFamilyProperties( 3927 VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, 3928 VkQueueFamilyProperties *pProperties) { 3929 struct loader_physical_device *phys_dev = 3930 (struct loader_physical_device *)physicalDevice; 3931 struct loader_icd *icd = phys_dev->this_icd; 3932 3933 if (icd->GetPhysicalDeviceQueueFamilyProperties) 3934 icd->GetPhysicalDeviceQueueFamilyProperties( 3935 phys_dev->phys_dev, pQueueFamilyPropertyCount, pProperties); 3936} 3937 3938VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceMemoryProperties( 3939 VkPhysicalDevice physicalDevice, 3940 VkPhysicalDeviceMemoryProperties *pProperties) { 3941 struct loader_physical_device *phys_dev = 3942 (struct loader_physical_device *)physicalDevice; 3943 struct loader_icd *icd = phys_dev->this_icd; 3944 3945 if (icd->GetPhysicalDeviceMemoryProperties) 3946 icd->GetPhysicalDeviceMemoryProperties(phys_dev->phys_dev, pProperties); 3947} 3948 3949VKAPI_ATTR void VKAPI_CALL 3950loader_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, 3951 VkPhysicalDeviceFeatures *pFeatures) { 3952 struct loader_physical_device *phys_dev = 3953 (struct loader_physical_device *)physicalDevice; 3954 struct loader_icd *icd = phys_dev->this_icd; 3955 3956 if (icd->GetPhysicalDeviceFeatures) 3957 icd->GetPhysicalDeviceFeatures(phys_dev->phys_dev, pFeatures); 3958} 3959 3960VKAPI_ATTR void VKAPI_CALL 3961loader_GetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, 3962 VkFormat format, 3963 VkFormatProperties *pFormatInfo) { 3964 struct loader_physical_device *phys_dev = 3965 (struct loader_physical_device *)physicalDevice; 3966 struct loader_icd *icd = phys_dev->this_icd; 3967 3968 if (icd->GetPhysicalDeviceFormatProperties) 3969 icd->GetPhysicalDeviceFormatProperties(phys_dev->phys_dev, format, 3970 pFormatInfo); 3971} 3972 3973VKAPI_ATTR VkResult VKAPI_CALL loader_GetPhysicalDeviceImageFormatProperties( 3974 VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, 3975 VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, 3976 VkImageFormatProperties *pImageFormatProperties) { 3977 struct loader_physical_device *phys_dev = 3978 (struct loader_physical_device *)physicalDevice; 3979 struct loader_icd *icd = phys_dev->this_icd; 3980 3981 if (!icd->GetPhysicalDeviceImageFormatProperties) 3982 return VK_ERROR_INITIALIZATION_FAILED; 3983 3984 return icd->GetPhysicalDeviceImageFormatProperties( 3985 phys_dev->phys_dev, format, type, tiling, usage, flags, 3986 pImageFormatProperties); 3987} 3988 3989VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceSparseImageFormatProperties( 3990 VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, 3991 VkSampleCountFlagBits samples, VkImageUsageFlags usage, 3992 VkImageTiling tiling, uint32_t *pNumProperties, 3993 VkSparseImageFormatProperties *pProperties) { 3994 struct loader_physical_device *phys_dev = 3995 (struct loader_physical_device *)physicalDevice; 3996 struct loader_icd *icd = phys_dev->this_icd; 3997 3998 if (icd->GetPhysicalDeviceSparseImageFormatProperties) 3999 icd->GetPhysicalDeviceSparseImageFormatProperties( 4000 phys_dev->phys_dev, format, type, samples, usage, tiling, 4001 pNumProperties, pProperties); 4002} 4003 4004VKAPI_ATTR VkResult VKAPI_CALL 4005loader_CreateDevice(VkPhysicalDevice physicalDevice, 4006 const VkDeviceCreateInfo *pCreateInfo, 4007 const VkAllocationCallbacks *pAllocator, 4008 VkDevice *pDevice) { 4009 struct loader_physical_device *phys_dev; 4010 struct loader_icd *icd; 4011 struct loader_device *dev; 4012 struct loader_instance *inst; 4013 struct loader_layer_list activated_layer_list = {0}; 4014 VkResult res; 4015 4016 assert(pCreateInfo->queueCreateInfoCount >= 1); 4017 4018 // TODO this only works for one physical device per instance 4019 // once CreateDevice layer bootstrapping is done via DeviceCreateInfo 4020 // hopefully don't need this anymore in trampoline code 4021 phys_dev = loader_get_physical_device(physicalDevice); 4022 icd = phys_dev->this_icd; 4023 if (!icd) 4024 return VK_ERROR_INITIALIZATION_FAILED; 4025 4026 inst = phys_dev->this_instance; 4027 4028 if (!icd->CreateDevice) { 4029 return VK_ERROR_INITIALIZATION_FAILED; 4030 } 4031 4032 /* validate any app enabled layers are available */ 4033 if (pCreateInfo->enabledLayerCount > 0) { 4034 res = loader_validate_layers(inst, pCreateInfo->enabledLayerCount, 4035 pCreateInfo->ppEnabledLayerNames, 4036 &inst->device_layer_list); 4037 if (res != VK_SUCCESS) { 4038 return res; 4039 } 4040 } 4041 4042 /* Get the physical device extensions if they haven't been retrieved yet */ 4043 if (phys_dev->device_extension_cache.capacity == 0) { 4044 if (!loader_init_generic_list( 4045 inst, 4046 (struct loader_generic_list *)&phys_dev->device_extension_cache, 4047 sizeof(VkExtensionProperties))) { 4048 return VK_ERROR_OUT_OF_HOST_MEMORY; 4049 } 4050 4051 res = loader_add_device_extensions( 4052 inst, icd, phys_dev->phys_dev, 4053 phys_dev->this_icd->this_icd_lib->lib_name, 4054 &phys_dev->device_extension_cache); 4055 if (res != VK_SUCCESS) { 4056 return res; 4057 } 4058 } 4059 4060 /* convert any meta layers to the actual layers makes a copy of layer name*/ 4061 uint32_t saved_layer_count = pCreateInfo->enabledLayerCount; 4062 char **saved_layer_names; 4063 char **saved_layer_ptr; 4064 saved_layer_names = 4065 loader_stack_alloc(sizeof(char *) * pCreateInfo->enabledLayerCount); 4066 for (uint32_t i = 0; i < saved_layer_count; i++) { 4067 saved_layer_names[i] = (char *)pCreateInfo->ppEnabledLayerNames[i]; 4068 } 4069 saved_layer_ptr = (char **)pCreateInfo->ppEnabledLayerNames; 4070 4071 loader_expand_layer_names( 4072 inst, std_validation_str, 4073 sizeof(std_validation_names) / sizeof(std_validation_names[0]), 4074 std_validation_names, (uint32_t *)&pCreateInfo->enabledLayerCount, 4075 (char ***)&pCreateInfo->ppEnabledLayerNames); 4076 4077 /* fetch a list of all layers activated, explicit and implicit */ 4078 res = loader_enable_device_layers(inst, icd, &activated_layer_list, 4079 pCreateInfo, &inst->device_layer_list); 4080 if (res != VK_SUCCESS) { 4081 loader_unexpand_dev_layer_names(inst, saved_layer_count, 4082 saved_layer_names, saved_layer_ptr, 4083 pCreateInfo); 4084 return res; 4085 } 4086 4087 /* make sure requested extensions to be enabled are supported */ 4088 res = loader_validate_device_extensions(phys_dev, &activated_layer_list, 4089 pCreateInfo); 4090 if (res != VK_SUCCESS) { 4091 loader_unexpand_dev_layer_names(inst, saved_layer_count, 4092 saved_layer_names, saved_layer_ptr, 4093 pCreateInfo); 4094 loader_destroy_generic_list( 4095 inst, (struct loader_generic_list *)&activated_layer_list); 4096 return res; 4097 } 4098 4099 dev = loader_add_logical_device(inst, &icd->logical_device_list); 4100 if (dev == NULL) { 4101 loader_unexpand_dev_layer_names(inst, saved_layer_count, 4102 saved_layer_names, saved_layer_ptr, 4103 pCreateInfo); 4104 loader_destroy_generic_list( 4105 inst, (struct loader_generic_list *)&activated_layer_list); 4106 return VK_ERROR_OUT_OF_HOST_MEMORY; 4107 } 4108 4109 /* move the locally filled layer list into the device, and pass ownership of 4110 * the memory */ 4111 dev->activated_layer_list.capacity = activated_layer_list.capacity; 4112 dev->activated_layer_list.count = activated_layer_list.count; 4113 dev->activated_layer_list.list = activated_layer_list.list; 4114 memset(&activated_layer_list, 0, sizeof(activated_layer_list)); 4115 4116 /* activate any layers on device chain which terminates with device*/ 4117 res = loader_enable_device_layers(inst, icd, &dev->activated_layer_list, 4118 pCreateInfo, &inst->device_layer_list); 4119 if (res != VK_SUCCESS) { 4120 loader_unexpand_dev_layer_names(inst, saved_layer_count, 4121 saved_layer_names, saved_layer_ptr, 4122 pCreateInfo); 4123 loader_remove_logical_device(inst, icd, dev); 4124 return res; 4125 } 4126 4127 res = loader_create_device_chain(physicalDevice, pCreateInfo, pAllocator, 4128 inst, icd, dev); 4129 if (res != VK_SUCCESS) { 4130 loader_unexpand_dev_layer_names(inst, saved_layer_count, 4131 saved_layer_names, saved_layer_ptr, 4132 pCreateInfo); 4133 loader_remove_logical_device(inst, icd, dev); 4134 return res; 4135 } 4136 4137 *pDevice = dev->device; 4138 4139 /* initialize any device extension dispatch entry's from the instance list*/ 4140 loader_init_dispatch_dev_ext(inst, dev); 4141 4142 /* initialize WSI device extensions as part of core dispatch since loader 4143 * has 4144 * dedicated trampoline code for these*/ 4145 loader_init_device_extension_dispatch_table( 4146 &dev->loader_dispatch, 4147 dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, *pDevice); 4148 4149 loader_unexpand_dev_layer_names(inst, saved_layer_count, saved_layer_names, 4150 saved_layer_ptr, pCreateInfo); 4151 return res; 4152} 4153 4154/** 4155 * Get an instance level or global level entry point address. 4156 * @param instance 4157 * @param pName 4158 * @return 4159 * If instance == NULL returns a global level functions only 4160 * If instance is valid returns a trampoline entry point for all dispatchable 4161 * Vulkan 4162 * functions both core and extensions. 4163 */ 4164LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL 4165vkGetInstanceProcAddr(VkInstance instance, const char *pName) { 4166 4167 void *addr; 4168 4169 addr = globalGetProcAddr(pName); 4170 if (instance == VK_NULL_HANDLE) { 4171 // get entrypoint addresses that are global (no dispatchable object) 4172 4173 return addr; 4174 } else { 4175 // if a global entrypoint return NULL 4176 if (addr) 4177 return NULL; 4178 } 4179 4180 struct loader_instance *ptr_instance = loader_get_instance(instance); 4181 if (ptr_instance == NULL) 4182 return NULL; 4183 // Return trampoline code for non-global entrypoints including any 4184 // extensions. 4185 // Device extensions are returned if a layer or ICD supports the extension. 4186 // Instance extensions are returned if the extension is enabled and the 4187 // loader 4188 // or someone else supports the extension 4189 return trampolineGetProcAddr(ptr_instance, pName); 4190} 4191 4192/** 4193 * Get a device level or global level entry point address. 4194 * @param device 4195 * @param pName 4196 * @return 4197 * If device is valid, returns a device relative entry point for device level 4198 * entry points both core and extensions. 4199 * Device relative means call down the device chain. 4200 */ 4201LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL 4202vkGetDeviceProcAddr(VkDevice device, const char *pName) { 4203 void *addr; 4204 4205 /* for entrypoints that loader must handle (ie non-dispatchable or create 4206 object) 4207 make sure the loader entrypoint is returned */ 4208 addr = loader_non_passthrough_gdpa(pName); 4209 if (addr) { 4210 return addr; 4211 } 4212 4213 /* Although CreateDevice is on device chain it's dispatchable object isn't 4214 * a VkDevice or child of VkDevice so return NULL. 4215 */ 4216 if (!strcmp(pName, "CreateDevice")) 4217 return NULL; 4218 4219 /* return the dispatch table entrypoint for the fastest case */ 4220 const VkLayerDispatchTable *disp_table = *(VkLayerDispatchTable **)device; 4221 if (disp_table == NULL) 4222 return NULL; 4223 4224 addr = loader_lookup_device_dispatch_table(disp_table, pName); 4225 if (addr) 4226 return addr; 4227 4228 if (disp_table->GetDeviceProcAddr == NULL) 4229 return NULL; 4230 return disp_table->GetDeviceProcAddr(device, pName); 4231} 4232 4233LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 4234vkEnumerateInstanceExtensionProperties(const char *pLayerName, 4235 uint32_t *pPropertyCount, 4236 VkExtensionProperties *pProperties) { 4237 struct loader_extension_list *global_ext_list = NULL; 4238 struct loader_layer_list instance_layers; 4239 struct loader_extension_list icd_extensions; 4240 struct loader_icd_libs icd_libs; 4241 uint32_t copy_size; 4242 4243 tls_instance = NULL; 4244 memset(&icd_extensions, 0, sizeof(icd_extensions)); 4245 memset(&instance_layers, 0, sizeof(instance_layers)); 4246 loader_platform_thread_once(&once_init, loader_initialize); 4247 4248 /* get layer libraries if needed */ 4249 if (pLayerName && strlen(pLayerName) != 0) { 4250 if (vk_string_validate(MaxLoaderStringLength, pLayerName) == 4251 VK_STRING_ERROR_NONE) { 4252 loader_layer_scan(NULL, &instance_layers, NULL); 4253 for (uint32_t i = 0; i < instance_layers.count; i++) { 4254 struct loader_layer_properties *props = 4255 &instance_layers.list[i]; 4256 if (strcmp(props->info.layerName, pLayerName) == 0) { 4257 global_ext_list = &props->instance_extension_list; 4258 } 4259 } 4260 } else { 4261 assert(VK_FALSE && "vkEnumerateInstanceExtensionProperties: " 4262 "pLayerName is too long or is badly formed"); 4263 return VK_ERROR_EXTENSION_NOT_PRESENT; 4264 } 4265 } else { 4266 /* Scan/discover all ICD libraries */ 4267 memset(&icd_libs, 0, sizeof(struct loader_icd_libs)); 4268 loader_icd_scan(NULL, &icd_libs); 4269 /* get extensions from all ICD's, merge so no duplicates */ 4270 loader_get_icd_loader_instance_extensions(NULL, &icd_libs, 4271 &icd_extensions); 4272 loader_scanned_icd_clear(NULL, &icd_libs); 4273 global_ext_list = &icd_extensions; 4274 } 4275 4276 if (global_ext_list == NULL) { 4277 loader_destroy_layer_list(NULL, &instance_layers); 4278 return VK_ERROR_LAYER_NOT_PRESENT; 4279 } 4280 4281 if (pProperties == NULL) { 4282 *pPropertyCount = global_ext_list->count; 4283 loader_destroy_layer_list(NULL, &instance_layers); 4284 loader_destroy_generic_list( 4285 NULL, (struct loader_generic_list *)&icd_extensions); 4286 return VK_SUCCESS; 4287 } 4288 4289 copy_size = *pPropertyCount < global_ext_list->count 4290 ? *pPropertyCount 4291 : global_ext_list->count; 4292 for (uint32_t i = 0; i < copy_size; i++) { 4293 memcpy(&pProperties[i], &global_ext_list->list[i], 4294 sizeof(VkExtensionProperties)); 4295 } 4296 *pPropertyCount = copy_size; 4297 loader_destroy_generic_list(NULL, 4298 (struct loader_generic_list *)&icd_extensions); 4299 4300 if (copy_size < global_ext_list->count) { 4301 loader_destroy_layer_list(NULL, &instance_layers); 4302 return VK_INCOMPLETE; 4303 } 4304 4305 loader_destroy_layer_list(NULL, &instance_layers); 4306 return VK_SUCCESS; 4307} 4308 4309LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 4310vkEnumerateInstanceLayerProperties(uint32_t *pPropertyCount, 4311 VkLayerProperties *pProperties) { 4312 4313 struct loader_layer_list instance_layer_list; 4314 tls_instance = NULL; 4315 4316 loader_platform_thread_once(&once_init, loader_initialize); 4317 4318 uint32_t copy_size; 4319 4320 /* get layer libraries */ 4321 memset(&instance_layer_list, 0, sizeof(instance_layer_list)); 4322 loader_layer_scan(NULL, &instance_layer_list, NULL); 4323 4324 if (pProperties == NULL) { 4325 *pPropertyCount = instance_layer_list.count; 4326 loader_destroy_layer_list(NULL, &instance_layer_list); 4327 return VK_SUCCESS; 4328 } 4329 4330 copy_size = (*pPropertyCount < instance_layer_list.count) 4331 ? *pPropertyCount 4332 : instance_layer_list.count; 4333 for (uint32_t i = 0; i < copy_size; i++) { 4334 memcpy(&pProperties[i], &instance_layer_list.list[i].info, 4335 sizeof(VkLayerProperties)); 4336 } 4337 4338 *pPropertyCount = copy_size; 4339 loader_destroy_layer_list(NULL, &instance_layer_list); 4340 4341 if (copy_size < instance_layer_list.count) { 4342 return VK_INCOMPLETE; 4343 } 4344 4345 return VK_SUCCESS; 4346} 4347 4348VKAPI_ATTR VkResult VKAPI_CALL 4349loader_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 4350 const char *pLayerName, 4351 uint32_t *pPropertyCount, 4352 VkExtensionProperties *pProperties) { 4353 struct loader_physical_device *phys_dev; 4354 uint32_t copy_size; 4355 4356 uint32_t count; 4357 struct loader_device_extension_list *dev_ext_list = NULL; 4358 struct loader_layer_list implicit_layer_list; 4359 4360 // TODO fix this aliases physical devices 4361 phys_dev = loader_get_physical_device(physicalDevice); 4362 4363 /* get layer libraries if needed */ 4364 if (pLayerName && strlen(pLayerName) != 0) { 4365 if (vk_string_validate(MaxLoaderStringLength, pLayerName) == 4366 VK_STRING_ERROR_NONE) { 4367 for (uint32_t i = 0; 4368 i < phys_dev->this_instance->device_layer_list.count; i++) { 4369 struct loader_layer_properties *props = 4370 &phys_dev->this_instance->device_layer_list.list[i]; 4371 if (strcmp(props->info.layerName, pLayerName) == 0) { 4372 dev_ext_list = &props->device_extension_list; 4373 } 4374 } 4375 count = (dev_ext_list == NULL) ? 0 : dev_ext_list->count; 4376 if (pProperties == NULL) { 4377 *pPropertyCount = count; 4378 return VK_SUCCESS; 4379 } 4380 4381 copy_size = *pPropertyCount < count ? *pPropertyCount : count; 4382 for (uint32_t i = 0; i < copy_size; i++) { 4383 memcpy(&pProperties[i], &dev_ext_list->list[i].props, 4384 sizeof(VkExtensionProperties)); 4385 } 4386 *pPropertyCount = copy_size; 4387 4388 if (copy_size < count) { 4389 return VK_INCOMPLETE; 4390 } 4391 } else { 4392 loader_log(phys_dev->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 4393 0, "vkEnumerateDeviceExtensionProperties: pLayerName " 4394 "is too long or is badly formed"); 4395 return VK_ERROR_EXTENSION_NOT_PRESENT; 4396 } 4397 return VK_SUCCESS; 4398 } else { 4399 /* this case is during the call down the instance chain with pLayerName 4400 * == NULL*/ 4401 struct loader_icd *icd = phys_dev->this_icd; 4402 uint32_t icd_ext_count = *pPropertyCount; 4403 VkResult res; 4404 4405 /* get device extensions */ 4406 res = icd->EnumerateDeviceExtensionProperties( 4407 phys_dev->phys_dev, NULL, &icd_ext_count, pProperties); 4408 if (res != VK_SUCCESS) 4409 return res; 4410 4411 loader_init_layer_list(phys_dev->this_instance, &implicit_layer_list); 4412 4413 loader_add_layer_implicit( 4414 phys_dev->this_instance, VK_LAYER_TYPE_INSTANCE_IMPLICIT, 4415 &implicit_layer_list, 4416 &phys_dev->this_instance->instance_layer_list); 4417 /* we need to determine which implicit layers are active, 4418 * and then add their extensions. This can't be cached as 4419 * it depends on results of environment variables (which can change). 4420 */ 4421 if (pProperties != NULL) { 4422 /* initialize dev_extension list within the physicalDevice object */ 4423 res = loader_init_device_extensions( 4424 phys_dev->this_instance, phys_dev, icd_ext_count, pProperties, 4425 &phys_dev->device_extension_cache); 4426 if (res != VK_SUCCESS) 4427 return res; 4428 4429 /* we need to determine which implicit layers are active, 4430 * and then add their extensions. This can't be cached as 4431 * it depends on results of environment variables (which can 4432 * change). 4433 */ 4434 struct loader_extension_list all_exts = {0}; 4435 loader_add_to_ext_list(phys_dev->this_instance, &all_exts, 4436 phys_dev->device_extension_cache.count, 4437 phys_dev->device_extension_cache.list); 4438 4439 loader_init_layer_list(phys_dev->this_instance, 4440 &implicit_layer_list); 4441 4442 loader_add_layer_implicit( 4443 phys_dev->this_instance, VK_LAYER_TYPE_INSTANCE_IMPLICIT, 4444 &implicit_layer_list, 4445 &phys_dev->this_instance->instance_layer_list); 4446 4447 for (uint32_t i = 0; i < implicit_layer_list.count; i++) { 4448 for ( 4449 uint32_t j = 0; 4450 j < implicit_layer_list.list[i].device_extension_list.count; 4451 j++) { 4452 loader_add_to_ext_list(phys_dev->this_instance, &all_exts, 4453 1, 4454 &implicit_layer_list.list[i] 4455 .device_extension_list.list[j] 4456 .props); 4457 } 4458 } 4459 uint32_t capacity = *pPropertyCount; 4460 VkExtensionProperties *props = pProperties; 4461 4462 for (uint32_t i = 0; i < all_exts.count && i < capacity; i++) { 4463 props[i] = all_exts.list[i]; 4464 } 4465 /* wasn't enough space for the extensions, we did partial copy now 4466 * return VK_INCOMPLETE */ 4467 if (capacity < all_exts.count) { 4468 res = VK_INCOMPLETE; 4469 } else { 4470 *pPropertyCount = all_exts.count; 4471 } 4472 loader_destroy_generic_list( 4473 phys_dev->this_instance, 4474 (struct loader_generic_list *)&all_exts); 4475 } else { 4476 /* just return the count; need to add in the count of implicit layer 4477 * extensions 4478 * don't worry about duplicates being added in the count */ 4479 *pPropertyCount = icd_ext_count; 4480 4481 for (uint32_t i = 0; i < implicit_layer_list.count; i++) { 4482 *pPropertyCount += 4483 implicit_layer_list.list[i].device_extension_list.count; 4484 } 4485 res = VK_SUCCESS; 4486 } 4487 4488 loader_destroy_generic_list( 4489 phys_dev->this_instance, 4490 (struct loader_generic_list *)&implicit_layer_list); 4491 return res; 4492 } 4493} 4494 4495VKAPI_ATTR VkResult VKAPI_CALL 4496loader_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, 4497 uint32_t *pPropertyCount, 4498 VkLayerProperties *pProperties) { 4499 uint32_t copy_size; 4500 struct loader_physical_device *phys_dev; 4501 // TODO fix this, aliases physical devices 4502 phys_dev = loader_get_physical_device(physicalDevice); 4503 uint32_t count = phys_dev->this_instance->device_layer_list.count; 4504 4505 if (pProperties == NULL) { 4506 *pPropertyCount = count; 4507 return VK_SUCCESS; 4508 } 4509 4510 copy_size = (*pPropertyCount < count) ? *pPropertyCount : count; 4511 for (uint32_t i = 0; i < copy_size; i++) { 4512 memcpy(&pProperties[i], 4513 &(phys_dev->this_instance->device_layer_list.list[i].info), 4514 sizeof(VkLayerProperties)); 4515 } 4516 *pPropertyCount = copy_size; 4517 4518 if (copy_size < count) { 4519 return VK_INCOMPLETE; 4520 } 4521 4522 return VK_SUCCESS; 4523} 4524 4525VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) { 4526 VkStringErrorFlags result = VK_STRING_ERROR_NONE; 4527 int num_char_bytes = 0; 4528 int i, j; 4529 4530 for (i = 0; i < max_length; i++) { 4531 if (utf8[i] == 0) { 4532 break; 4533 } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) { 4534 num_char_bytes = 0; 4535 } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) { 4536 num_char_bytes = 1; 4537 } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) { 4538 num_char_bytes = 2; 4539 } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) { 4540 num_char_bytes = 3; 4541 } else { 4542 result = VK_STRING_ERROR_BAD_DATA; 4543 } 4544 4545 // Validate the following num_char_bytes of data 4546 for (j = 0; (j < num_char_bytes) && (i < max_length); j++) { 4547 if (++i == max_length) { 4548 result |= VK_STRING_ERROR_LENGTH; 4549 break; 4550 } 4551 if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) { 4552 result |= VK_STRING_ERROR_BAD_DATA; 4553 } 4554 } 4555 } 4556 return result; 4557} 4558