loader.c revision acb1359c8e1528e5d67bb0101c94c48a07785098
1/* 2 * 3 * Copyright (C) 2015 Valve Corporation 4 * Copyright (C) 2015 Google Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included 14 * in all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Author: Chia-I Wu <olvaffe@gmail.com> 25 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com> 26 * Author: Ian Elliott <ian@LunarG.com> 27 * Author: Jon Ashburn <jon@lunarg.com> 28 * 29 */ 30 31#define _GNU_SOURCE 32#include <stdio.h> 33#include <stdlib.h> 34#include <stdarg.h> 35#include <stdbool.h> 36#include <string.h> 37 38#include <sys/types.h> 39#if defined(_WIN32) 40#include "dirent_on_windows.h" 41#else // _WIN32 42#include <dirent.h> 43#endif // _WIN32 44#include "vk_loader_platform.h" 45#include "loader.h" 46#include "gpa_helper.h" 47#include "table_ops.h" 48#include "debug_report.h" 49#include "wsi.h" 50#include "vulkan/vk_icd.h" 51#include "cJSON.h" 52#include "murmurhash.h" 53 54static loader_platform_dl_handle loader_add_layer_lib( 55 const struct loader_instance *inst, 56 const char *chain_type, 57 struct loader_layer_properties *layer_prop); 58 59static void loader_remove_layer_lib( 60 struct loader_instance *inst, 61 struct loader_layer_properties *layer_prop); 62 63struct loader_struct loader = {0}; 64// TLS for instance for alloc/free callbacks 65THREAD_LOCAL_DECL struct loader_instance *tls_instance; 66 67static bool loader_init_generic_list( 68 const struct loader_instance *inst, 69 struct loader_generic_list *list_info, 70 size_t element_size); 71 72static int loader_platform_combine_path(char *dest, int len, ...); 73 74struct loader_phys_dev_per_icd { 75 uint32_t count; 76 VkPhysicalDevice *phys_devs; 77}; 78 79enum loader_debug { 80 LOADER_INFO_BIT = 0x01, 81 LOADER_WARN_BIT = 0x02, 82 LOADER_PERF_BIT = 0x04, 83 LOADER_ERROR_BIT = 0x08, 84 LOADER_DEBUG_BIT = 0x10, 85}; 86 87uint32_t g_loader_debug = 0; 88uint32_t g_loader_log_msgs = 0; 89 90//thread safety lock for accessing global data structures such as "loader" 91// all entrypoints on the instance chain need to be locked except GPA 92// additionally CreateDevice and DestroyDevice needs to be locked 93loader_platform_thread_mutex loader_lock; 94loader_platform_thread_mutex loader_json_lock; 95 96// This table contains the loader's instance dispatch table, which contains 97// default functions if no instance layers are activated. This contains 98// pointers to "terminator functions". 99const VkLayerInstanceDispatchTable instance_disp = { 100 .GetInstanceProcAddr = vkGetInstanceProcAddr, 101 .CreateInstance = loader_CreateInstance, 102 .DestroyInstance = loader_DestroyInstance, 103 .EnumeratePhysicalDevices = loader_EnumeratePhysicalDevices, 104 .GetPhysicalDeviceFeatures = loader_GetPhysicalDeviceFeatures, 105 .GetPhysicalDeviceFormatProperties = loader_GetPhysicalDeviceFormatProperties, 106 .GetPhysicalDeviceImageFormatProperties = loader_GetPhysicalDeviceImageFormatProperties, 107 .GetPhysicalDeviceProperties = loader_GetPhysicalDeviceProperties, 108 .GetPhysicalDeviceQueueFamilyProperties = loader_GetPhysicalDeviceQueueFamilyProperties, 109 .GetPhysicalDeviceMemoryProperties = loader_GetPhysicalDeviceMemoryProperties, 110 .EnumerateDeviceExtensionProperties = loader_EnumerateDeviceExtensionProperties, 111 .EnumerateDeviceLayerProperties = loader_EnumerateDeviceLayerProperties, 112 .GetPhysicalDeviceSparseImageFormatProperties = loader_GetPhysicalDeviceSparseImageFormatProperties, 113 .DestroySurfaceKHR = loader_DestroySurfaceKHR, 114 .GetPhysicalDeviceSurfaceSupportKHR = loader_GetPhysicalDeviceSurfaceSupportKHR, 115 .GetPhysicalDeviceSurfaceCapabilitiesKHR = loader_GetPhysicalDeviceSurfaceCapabilitiesKHR, 116 .GetPhysicalDeviceSurfaceFormatsKHR = loader_GetPhysicalDeviceSurfaceFormatsKHR, 117 .GetPhysicalDeviceSurfacePresentModesKHR = loader_GetPhysicalDeviceSurfacePresentModesKHR, 118 .CreateDebugReportCallbackEXT = loader_CreateDebugReportCallback, 119 .DestroyDebugReportCallbackEXT = loader_DestroyDebugReportCallback, 120 .DebugReportMessageEXT = loader_DebugReportMessage, 121#ifdef VK_USE_PLATFORM_MIR_KHR 122 .CreateMirSurfaceKHR = loader_CreateMirSurfaceKHR, 123 .GetPhysicalDeviceMirPresentationSupportKHR = loader_GetPhysicalDeviceMirPresentationSupportKHR, 124#endif 125#ifdef VK_USE_PLATFORM_WAYLAND_KHR 126 .CreateWaylandSurfaceKHR = loader_CreateWaylandSurfaceKHR, 127 .GetPhysicalDeviceWaylandPresentationSupportKHR = loader_GetPhysicalDeviceWaylandPresentationSupportKHR, 128#endif 129#ifdef VK_USE_PLATFORM_WIN32_KHR 130 .CreateWin32SurfaceKHR = loader_CreateWin32SurfaceKHR, 131 .GetPhysicalDeviceWin32PresentationSupportKHR = loader_GetPhysicalDeviceWin32PresentationSupportKHR, 132#endif 133#ifdef VK_USE_PLATFORM_XCB_KHR 134 .CreateXcbSurfaceKHR = loader_CreateXcbSurfaceKHR, 135 .GetPhysicalDeviceXcbPresentationSupportKHR = loader_GetPhysicalDeviceXcbPresentationSupportKHR, 136#endif 137#ifdef VK_USE_PLATFORM_XLIB_KHR 138 .CreateXlibSurfaceKHR = loader_CreateXlibSurfaceKHR, 139 .GetPhysicalDeviceXlibPresentationSupportKHR = loader_GetPhysicalDeviceXlibPresentationSupportKHR, 140#endif 141#ifdef VK_USE_PLATFORM_ANDROID_KHR 142 .CreateAndroidSurfaceKHR = loader_CreateAndroidSurfaceKHR, 143#endif 144}; 145 146LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init); 147 148void* loader_heap_alloc( 149 const struct loader_instance *instance, 150 size_t size, 151 VkSystemAllocationScope alloc_scope) 152{ 153 if (instance && instance->alloc_callbacks.pfnAllocation) { 154 /* TODO: What should default alignment be? 1, 4, 8, other? */ 155 return instance->alloc_callbacks.pfnAllocation(instance->alloc_callbacks.pUserData, size, 4, alloc_scope); 156 } 157 return malloc(size); 158} 159 160void loader_heap_free( 161 const struct loader_instance *instance, 162 void *pMemory) 163{ 164 if (pMemory == NULL) return; 165 if (instance && instance->alloc_callbacks.pfnFree) { 166 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMemory); 167 return; 168 } 169 free(pMemory); 170} 171 172void* loader_heap_realloc( 173 const struct loader_instance *instance, 174 void *pMemory, 175 size_t orig_size, 176 size_t size, 177 VkSystemAllocationScope alloc_scope) 178{ 179 if (pMemory == NULL || orig_size == 0) 180 return loader_heap_alloc(instance, size, alloc_scope); 181 if (size == 0) { 182 loader_heap_free(instance, pMemory); 183 return NULL; 184 } 185 if (instance && instance->alloc_callbacks.pfnAllocation) { 186 if (size <= orig_size) { 187 memset(((uint8_t *)pMemory) + size, 0, orig_size - size); 188 return pMemory; 189 } 190 void *new_ptr = instance->alloc_callbacks.pfnAllocation(instance->alloc_callbacks.pUserData, size, 4, alloc_scope); 191 if (!new_ptr) 192 return NULL; 193 memcpy(new_ptr, pMemory, orig_size); 194 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMemory); 195 return new_ptr; 196 } 197 return realloc(pMemory, size); 198} 199 200void *loader_tls_heap_alloc(size_t size) 201{ 202 return loader_heap_alloc(tls_instance, size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 203} 204 205void loader_tls_heap_free(void *pMemory) 206{ 207 loader_heap_free(tls_instance, pMemory); 208} 209 210static void loader_log(const struct loader_instance *inst, VkFlags msg_type, int32_t msg_code, 211 const char *format, ...) 212{ 213 char msg[512]; 214 va_list ap; 215 int ret; 216 217 va_start(ap, format); 218 ret = vsnprintf(msg, sizeof(msg), format, ap); 219 if ((ret >= (int) sizeof(msg)) || ret < 0) { 220 msg[sizeof(msg)-1] = '\0'; 221 } 222 va_end(ap); 223 224 if (inst) { 225 util_DebugReportMessage(inst, msg_type, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, (uint64_t) inst, 0, msg_code, "loader", msg); 226 } 227 228 if (!(msg_type & g_loader_log_msgs)) { 229 return; 230 } 231 232#if defined(WIN32) 233 OutputDebugString(msg); 234 OutputDebugString("\n"); 235#endif 236 fputs(msg, stderr); 237 fputc('\n', stderr); 238} 239 240#if defined(WIN32) 241static char *loader_get_next_path(char *path); 242/** 243* Find the list of registry files (names within a key) in key "location". 244* 245* This function looks in the registry (hive = DEFAULT_VK_REGISTRY_HIVE) key as given in "location" 246* for a list or name/values which are added to a returned list (function return value). 247* The DWORD values within the key must be 0 or they are skipped. 248* Function return is a string with a ';' separated list of filenames. 249* Function return is NULL if no valid name/value pairs are found in the key, 250* or the key is not found. 251* 252* \returns 253* A string list of filenames as pointer. 254* When done using the returned string list, pointer should be freed. 255*/ 256static char *loader_get_registry_files(const struct loader_instance *inst, char *location) 257{ 258 LONG rtn_value; 259 HKEY hive, key; 260 DWORD access_flags; 261 char name[2048]; 262 char *out = NULL; 263 char *loc = location; 264 char *next; 265 DWORD idx = 0; 266 DWORD name_size = sizeof(name); 267 DWORD value; 268 DWORD total_size = 4096; 269 DWORD value_size = sizeof(value); 270 271 while(*loc) 272 { 273 next = loader_get_next_path(loc); 274 hive = DEFAULT_VK_REGISTRY_HIVE; 275 access_flags = KEY_QUERY_VALUE; 276 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 277 if (rtn_value != ERROR_SUCCESS) { 278 // We didn't find the key. Try the 32-bit hive (where we've seen the 279 // key end up on some people's systems): 280 access_flags |= KEY_WOW64_32KEY; 281 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 282 if (rtn_value != ERROR_SUCCESS) { 283 // We still couldn't find the key, so give up: 284 loc = next; 285 continue; 286 } 287 } 288 289 while ((rtn_value = RegEnumValue(key, idx++, name, &name_size, NULL, NULL, (LPBYTE) &value, &value_size)) == ERROR_SUCCESS) { 290 if (value_size == sizeof(value) && value == 0) { 291 if (out == NULL) { 292 out = loader_heap_alloc(inst, total_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 293 out[0] = '\0'; 294 } 295 else if (strlen(out) + name_size + 1 > total_size) { 296 out = loader_heap_realloc(inst, out, total_size, total_size * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 297 total_size *= 2; 298 } 299 if (out == NULL) { 300 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Out of memory, failed loader_get_registry_files"); 301 return NULL; 302 } 303 if (strlen(out) == 0) 304 snprintf(out, name_size + 1, "%s", name); 305 else 306 snprintf(out + strlen(out), name_size + 2, "%c%s", PATH_SEPERATOR, name); 307 } 308 name_size = 2048; 309 } 310 loc = next; 311 } 312 313 return out; 314} 315 316#endif // WIN32 317 318/** 319 * Combine path elements, separating each element with the platform-specific 320 * directory separator, and save the combined string to a destination buffer, 321 * not exceeding the given length. Path elements are given as variadic args, 322 * with a NULL element terminating the list. 323 * 324 * \returns the total length of the combined string, not including an ASCII 325 * NUL termination character. This length may exceed the available storage: 326 * in this case, the written string will be truncated to avoid a buffer 327 * overrun, and the return value will greater than or equal to the storage 328 * size. A NULL argument may be provided as the destination buffer in order 329 * to determine the required string length without actually writing a string. 330 */ 331 332static int loader_platform_combine_path(char *dest, int len, ...) 333{ 334 int required_len = 0; 335 va_list ap; 336 const char *component; 337 338 va_start(ap, len); 339 340 while((component = va_arg(ap, const char *))) { 341 if (required_len > 0) { 342 // This path element is not the first non-empty element; prepend 343 // a directory separator if space allows 344 if (dest && required_len + 1 < len) { 345 snprintf(dest + required_len, len - required_len, "%c", 346 DIRECTORY_SYMBOL); 347 } 348 required_len++; 349 } 350 351 if (dest && required_len < len) { 352 strncpy(dest + required_len, component, len - required_len); 353 } 354 required_len += strlen(component); 355 } 356 357 va_end(ap); 358 359 // strncpy(3) won't add a NUL terminating byte in the event of truncation. 360 if (dest && required_len >= len) { 361 dest[len - 1] = '\0'; 362 } 363 364 return required_len; 365} 366 367 368/** 369 * Given string of three part form "maj.min.pat" convert to a vulkan version 370 * number. 371 */ 372static uint32_t loader_make_version(const char *vers_str) 373{ 374 uint32_t vers = 0, major=0, minor=0, patch=0; 375 char *minor_str= NULL; 376 char *patch_str = NULL; 377 char *cstr; 378 char *str; 379 380 if (!vers_str) 381 return vers; 382 cstr = loader_stack_alloc(strlen(vers_str) + 1); 383 strcpy(cstr, vers_str); 384 while ((str = strchr(cstr, '.')) != NULL) { 385 if (minor_str == NULL) { 386 minor_str = str + 1; 387 *str = '\0'; 388 major = atoi(cstr); 389 } 390 else if (patch_str == NULL) { 391 patch_str = str + 1; 392 *str = '\0'; 393 minor = atoi(minor_str); 394 } 395 else { 396 return vers; 397 } 398 cstr = str + 1; 399 } 400 patch = atoi(patch_str); 401 402 return VK_MAKE_VERSION(major, minor, patch); 403 404} 405 406bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) 407{ 408 return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false; 409} 410 411/** 412 * Search the given ext_array for an extension 413 * matching the given vk_ext_prop 414 */ 415bool has_vk_extension_property_array( 416 const VkExtensionProperties *vk_ext_prop, 417 const uint32_t count, 418 const VkExtensionProperties *ext_array) 419{ 420 for (uint32_t i = 0; i < count; i++) { 421 if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) 422 return true; 423 } 424 return false; 425} 426 427/** 428 * Search the given ext_list for an extension 429 * matching the given vk_ext_prop 430 */ 431bool has_vk_extension_property( 432 const VkExtensionProperties *vk_ext_prop, 433 const struct loader_extension_list *ext_list) 434{ 435 for (uint32_t i = 0; i < ext_list->count; i++) { 436 if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) 437 return true; 438 } 439 return false; 440} 441 442static inline bool loader_is_layer_type_device(const enum layer_type type) { 443 if ((type & VK_LAYER_TYPE_DEVICE_EXPLICIT) || 444 (type & VK_LAYER_TYPE_DEVICE_IMPLICIT)) 445 return true; 446 return false; 447} 448 449/* 450 * Search the given layer list for a layer matching the given layer name 451 */ 452static struct loader_layer_properties *loader_get_layer_property( 453 const char *name, 454 const struct loader_layer_list *layer_list) 455{ 456 for (uint32_t i = 0; i < layer_list->count; i++) { 457 const VkLayerProperties *item = &layer_list->list[i].info; 458 if (strcmp(name, item->layerName) == 0) 459 return &layer_list->list[i]; 460 } 461 return NULL; 462} 463 464/** 465 * Get the next unused layer property in the list. Init the property to zero. 466 */ 467static struct loader_layer_properties *loader_get_next_layer_property( 468 const struct loader_instance *inst, 469 struct loader_layer_list *layer_list) 470{ 471 if (layer_list->capacity == 0) { 472 layer_list->list = loader_heap_alloc(inst, 473 sizeof(struct loader_layer_properties) * 64, 474 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 475 if (layer_list->list == NULL) { 476 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Out of memory can't add any layer properties to list"); 477 return NULL; 478 } 479 memset(layer_list->list, 0, sizeof(struct loader_layer_properties) * 64); 480 layer_list->capacity = sizeof(struct loader_layer_properties) * 64; 481 } 482 483 // ensure enough room to add an entry 484 if ((layer_list->count + 1) * sizeof (struct loader_layer_properties) 485 > layer_list->capacity) { 486 layer_list->list = loader_heap_realloc(inst, layer_list->list, 487 layer_list->capacity, 488 layer_list->capacity * 2, 489 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 490 if (layer_list->list == NULL) { 491 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 492 "realloc failed for layer list"); 493 } 494 layer_list->capacity *= 2; 495 } 496 497 layer_list->count++; 498 return &(layer_list->list[layer_list->count - 1]); 499} 500 501/** 502 * Remove all layer properties entrys from the list 503 */ 504void loader_delete_layer_properties( 505 const struct loader_instance *inst, 506 struct loader_layer_list *layer_list) 507{ 508 uint32_t i, j; 509 struct loader_device_extension_list *dev_ext_list; 510 if (!layer_list) 511 return; 512 513 for (i = 0; i < layer_list->count; i++) { 514 loader_destroy_generic_list(inst, (struct loader_generic_list *) 515 &layer_list->list[i].instance_extension_list); 516 dev_ext_list = &layer_list->list[i].device_extension_list; 517 if (dev_ext_list->capacity > 0 && dev_ext_list->list->entrypoint_count > 0) { 518 for (j= 0; j < dev_ext_list->list->entrypoint_count; j++) { 519 loader_heap_free(inst, dev_ext_list->list->entrypoints[j]); 520 } 521 loader_heap_free(inst, dev_ext_list->list->entrypoints); 522 } 523 loader_destroy_generic_list(inst, (struct loader_generic_list *) 524 dev_ext_list); 525 } 526 layer_list->count = 0; 527 528 if (layer_list->capacity > 0) { 529 layer_list->capacity = 0; 530 loader_heap_free(inst, layer_list->list); 531 } 532 533} 534 535static void loader_add_instance_extensions( 536 const struct loader_instance *inst, 537 const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, 538 const char *lib_name, 539 struct loader_extension_list *ext_list) 540{ 541 uint32_t i, count; 542 VkExtensionProperties *ext_props; 543 VkResult res; 544 545 if (!fp_get_props) { 546 /* No EnumerateInstanceExtensionProperties defined */ 547 return; 548 } 549 550 res = fp_get_props(NULL, &count, NULL); 551 if (res != VK_SUCCESS) { 552 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Error getting Instance extension count from %s", lib_name); 553 return; 554 } 555 556 if (count == 0) { 557 /* No ExtensionProperties to report */ 558 return; 559 } 560 561 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); 562 563 res = fp_get_props(NULL, &count, ext_props); 564 if (res != VK_SUCCESS) { 565 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Error getting Instance extensions from %s", lib_name); 566 return; 567 } 568 569 for (i = 0; i < count; i++) { 570 char spec_version[64]; 571 572 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 573 VK_MAJOR(ext_props[i].specVersion), 574 VK_MINOR(ext_props[i].specVersion), 575 VK_PATCH(ext_props[i].specVersion)); 576 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 577 "Instance Extension: %s (%s) version %s", 578 ext_props[i].extensionName, lib_name, spec_version); 579 loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 580 } 581 582 return; 583} 584 585/* 586 * Initialize ext_list with the physical device extensions. 587 * The extension properties are passed as inputs in count and ext_props. 588 */ 589static VkResult loader_init_device_extensions( 590 const struct loader_instance *inst, 591 struct loader_physical_device *phys_dev, 592 uint32_t count, 593 VkExtensionProperties *ext_props, 594 struct loader_extension_list *ext_list) 595{ 596 VkResult res; 597 uint32_t i; 598 599 if (!loader_init_generic_list(inst, (struct loader_generic_list *) ext_list, 600 sizeof(VkExtensionProperties))) { 601 return VK_ERROR_OUT_OF_HOST_MEMORY; 602 } 603 604 for (i = 0; i < count; i++) { 605 char spec_version[64]; 606 607 snprintf(spec_version, sizeof (spec_version), "%d.%d.%d", 608 VK_MAJOR(ext_props[i].specVersion), 609 VK_MINOR(ext_props[i].specVersion), 610 VK_PATCH(ext_props[i].specVersion)); 611 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 612 "Device Extension: %s (%s) version %s", 613 ext_props[i].extensionName, phys_dev->this_icd->this_icd_lib->lib_name, spec_version); 614 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 615 if (res != VK_SUCCESS) 616 return res; 617 } 618 619 return VK_SUCCESS; 620} 621 622static VkResult loader_add_device_extensions( 623 const struct loader_instance *inst, 624 VkPhysicalDevice physical_device, 625 const char *lib_name, 626 struct loader_extension_list *ext_list) 627{ 628 uint32_t i, count; 629 VkResult res; 630 VkExtensionProperties *ext_props; 631 632 res = loader_EnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL); 633 if (res == VK_SUCCESS && count > 0) { 634 ext_props = loader_stack_alloc(count * sizeof (VkExtensionProperties)); 635 if (!ext_props) 636 return VK_ERROR_OUT_OF_HOST_MEMORY; 637 res = loader_EnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props); 638 if (res != VK_SUCCESS) 639 return res; 640 for (i = 0; i < count; i++) { 641 char spec_version[64]; 642 643 snprintf(spec_version, sizeof (spec_version), "%d.%d.%d", 644 VK_MAJOR(ext_props[i].specVersion), 645 VK_MINOR(ext_props[i].specVersion), 646 VK_PATCH(ext_props[i].specVersion)); 647 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 648 "Device Extension: %s (%s) version %s", 649 ext_props[i].extensionName, lib_name, spec_version); 650 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 651 if (res != VK_SUCCESS) 652 return res; 653 } 654 } else { 655 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Error getting physical device extension info count from library %s", lib_name); 656 return res; 657 } 658 659 return VK_SUCCESS; 660} 661 662static bool loader_init_generic_list(const struct loader_instance *inst, 663 struct loader_generic_list *list_info, 664 size_t element_size) 665{ 666 list_info->capacity = 32 * element_size; 667 list_info->list = loader_heap_alloc(inst, list_info->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 668 if (list_info->list == NULL) { 669 return false; 670 } 671 memset(list_info->list, 0, list_info->capacity); 672 list_info->count = 0; 673 return true; 674} 675 676void loader_destroy_generic_list(const struct loader_instance *inst, 677 struct loader_generic_list *list) 678{ 679 loader_heap_free(inst, list->list); 680 list->count = 0; 681 list->capacity = 0; 682} 683 684/* 685 * Append non-duplicate extension properties defined in props 686 * to the given ext_list. 687 * Return 688 * Vk_SUCCESS on success 689 */ 690VkResult loader_add_to_ext_list( 691 const struct loader_instance *inst, 692 struct loader_extension_list *ext_list, 693 uint32_t prop_list_count, 694 const VkExtensionProperties *props) 695{ 696 uint32_t i; 697 const VkExtensionProperties *cur_ext; 698 699 if (ext_list->list == NULL || ext_list->capacity == 0) { 700 loader_init_generic_list(inst, (struct loader_generic_list *) ext_list, 701 sizeof(VkExtensionProperties)); 702 } 703 704 if (ext_list->list == NULL) 705 return VK_ERROR_OUT_OF_HOST_MEMORY; 706 707 for (i = 0; i < prop_list_count; i++) { 708 cur_ext = &props[i]; 709 710 // look for duplicates 711 if (has_vk_extension_property(cur_ext, ext_list)) { 712 continue; 713 } 714 715 // add to list at end 716 // check for enough capacity 717 if (ext_list->count * sizeof(VkExtensionProperties) 718 >= ext_list->capacity) { 719 720 ext_list->list = loader_heap_realloc(inst, 721 ext_list->list, 722 ext_list->capacity, 723 ext_list->capacity * 2, 724 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 725 726 if (ext_list->list == NULL) 727 return VK_ERROR_OUT_OF_HOST_MEMORY; 728 729 // double capacity 730 ext_list->capacity *= 2; 731 } 732 733 memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties)); 734 ext_list->count++; 735 } 736 return VK_SUCCESS; 737} 738 739/* 740 * Append one extension property defined in props with entrypoints 741 * defined in entrys to the given ext_list. 742 * Return 743 * Vk_SUCCESS on success 744 */ 745VkResult loader_add_to_dev_ext_list( 746 const struct loader_instance *inst, 747 struct loader_device_extension_list *ext_list, 748 const VkExtensionProperties *props, 749 uint32_t entry_count, 750 char **entrys) 751{ 752 uint32_t idx; 753 if (ext_list->list == NULL || ext_list->capacity == 0) { 754 loader_init_generic_list(inst, (struct loader_generic_list *) ext_list, 755 sizeof(struct loader_dev_ext_props)); 756 } 757 758 if (ext_list->list == NULL) 759 return VK_ERROR_OUT_OF_HOST_MEMORY; 760 761 idx =ext_list->count; 762 // add to list at end 763 // check for enough capacity 764 if (idx * sizeof (struct loader_dev_ext_props) 765 >= ext_list->capacity) { 766 767 ext_list->list = loader_heap_realloc(inst, 768 ext_list->list, 769 ext_list->capacity, 770 ext_list->capacity * 2, 771 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 772 773 if (ext_list->list == NULL) 774 return VK_ERROR_OUT_OF_HOST_MEMORY; 775 776 // double capacity 777 ext_list->capacity *= 2; 778 } 779 780 memcpy(&ext_list->list[idx].props, props, sizeof(struct loader_dev_ext_props)); 781 ext_list->list[idx].entrypoint_count = entry_count; 782 ext_list->list[idx].entrypoints = loader_heap_alloc(inst, 783 sizeof(char *) * entry_count, 784 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 785 if (ext_list->list[idx].entrypoints == NULL) 786 return VK_ERROR_OUT_OF_HOST_MEMORY; 787 for (uint32_t i = 0; i < entry_count; i++) { 788 ext_list->list[idx].entrypoints[i] = loader_heap_alloc(inst, 789 strlen(entrys[i]) + 1, 790 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 791 if (ext_list->list[idx].entrypoints[i] == NULL) 792 return VK_ERROR_OUT_OF_HOST_MEMORY; 793 strcpy(ext_list->list[idx].entrypoints[i], entrys[i]); 794 } 795 ext_list->count++; 796 797 return VK_SUCCESS; 798} 799 800/** 801 * Search the given search_list for any layers in the props list. 802 * Add these to the output layer_list. Don't add duplicates to the output layer_list. 803 */ 804static VkResult loader_add_layer_names_to_list( 805 const struct loader_instance *inst, 806 struct loader_layer_list *output_list, 807 uint32_t name_count, 808 const char * const *names, 809 const struct loader_layer_list *search_list) 810{ 811 struct loader_layer_properties *layer_prop; 812 VkResult err = VK_SUCCESS; 813 814 for (uint32_t i = 0; i < name_count; i++) { 815 const char *search_target = names[i]; 816 layer_prop = loader_get_layer_property(search_target, search_list); 817 if (!layer_prop) { 818 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Unable to find layer %s", search_target); 819 err = VK_ERROR_LAYER_NOT_PRESENT; 820 continue; 821 } 822 823 loader_add_to_layer_list(inst, output_list, 1, layer_prop); 824 } 825 826 return err; 827} 828 829 830/* 831 * Manage lists of VkLayerProperties 832 */ 833static bool loader_init_layer_list(const struct loader_instance *inst, 834 struct loader_layer_list *list) 835{ 836 list->capacity = 32 * sizeof(struct loader_layer_properties); 837 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 838 if (list->list == NULL) { 839 return false; 840 } 841 memset(list->list, 0, list->capacity); 842 list->count = 0; 843 return true; 844} 845 846void loader_destroy_layer_list(const struct loader_instance *inst, 847 struct loader_layer_list *layer_list) 848{ 849 loader_heap_free(inst, layer_list->list); 850 layer_list->count = 0; 851 layer_list->capacity = 0; 852} 853 854/* 855 * Manage list of layer libraries (loader_lib_info) 856 */ 857static bool loader_init_layer_library_list(const struct loader_instance *inst, 858 struct loader_layer_library_list *list) 859{ 860 list->capacity = 32 * sizeof(struct loader_lib_info); 861 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 862 if (list->list == NULL) { 863 return false; 864 } 865 memset(list->list, 0, list->capacity); 866 list->count = 0; 867 return true; 868} 869 870void loader_destroy_layer_library_list(const struct loader_instance *inst, 871 struct loader_layer_library_list *list) 872{ 873 for (uint32_t i = 0; i < list->count; i++) { 874 loader_heap_free(inst, list->list[i].lib_name); 875 } 876 loader_heap_free(inst, list->list); 877 list->count = 0; 878 list->capacity = 0; 879} 880 881void loader_add_to_layer_library_list( 882 const struct loader_instance *inst, 883 struct loader_layer_library_list *list, 884 uint32_t item_count, 885 const struct loader_lib_info *new_items) 886{ 887 uint32_t i; 888 struct loader_lib_info *item; 889 890 if (list->list == NULL || list->capacity == 0) { 891 loader_init_layer_library_list(inst, list); 892 } 893 894 if (list->list == NULL) 895 return; 896 897 for (i = 0; i < item_count; i++) { 898 item = (struct loader_lib_info *) &new_items[i]; 899 900 // look for duplicates 901 for (uint32_t j = 0; j < list->count; j++) { 902 if (strcmp(list->list[i].lib_name, new_items->lib_name) == 0) { 903 continue; 904 } 905 } 906 907 // add to list at end 908 // check for enough capacity 909 if (list->count * sizeof(struct loader_lib_info) 910 >= list->capacity) { 911 912 list->list = loader_heap_realloc(inst, 913 list->list, 914 list->capacity, 915 list->capacity * 2, 916 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 917 // double capacity 918 list->capacity *= 2; 919 } 920 921 memcpy(&list->list[list->count], item, sizeof(struct loader_lib_info)); 922 list->count++; 923 } 924} 925 926 927/* 928 * Search the given layer list for a list 929 * matching the given VkLayerProperties 930 */ 931bool has_vk_layer_property( 932 const VkLayerProperties *vk_layer_prop, 933 const struct loader_layer_list *list) 934{ 935 for (uint32_t i = 0; i < list->count; i++) { 936 if (strcmp(vk_layer_prop->layerName, list->list[i].info.layerName) == 0) 937 return true; 938 } 939 return false; 940} 941 942/* 943 * Search the given layer list for a layer 944 * matching the given name 945 */ 946bool has_layer_name( 947 const char *name, 948 const struct loader_layer_list *list) 949{ 950 for (uint32_t i = 0; i < list->count; i++) { 951 if (strcmp(name, list->list[i].info.layerName) == 0) 952 return true; 953 } 954 return false; 955} 956 957/* 958 * Append non-duplicate layer properties defined in prop_list 959 * to the given layer_info list 960 */ 961void loader_add_to_layer_list( 962 const struct loader_instance *inst, 963 struct loader_layer_list *list, 964 uint32_t prop_list_count, 965 const struct loader_layer_properties *props) 966{ 967 uint32_t i; 968 struct loader_layer_properties *layer; 969 970 if (list->list == NULL || list->capacity == 0) { 971 loader_init_layer_list(inst, list); 972 } 973 974 if (list->list == NULL) 975 return; 976 977 for (i = 0; i < prop_list_count; i++) { 978 layer = (struct loader_layer_properties *) &props[i]; 979 980 // look for duplicates 981 if (has_vk_layer_property(&layer->info, list)) { 982 continue; 983 } 984 985 // add to list at end 986 // check for enough capacity 987 if (list->count * sizeof(struct loader_layer_properties) 988 >= list->capacity) { 989 990 list->list = loader_heap_realloc(inst, 991 list->list, 992 list->capacity, 993 list->capacity * 2, 994 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 995 // double capacity 996 list->capacity *= 2; 997 } 998 999 memcpy(&list->list[list->count], layer, sizeof(struct loader_layer_properties)); 1000 list->count++; 1001 } 1002} 1003 1004/** 1005 * Search the search_list for any layer with a name 1006 * that matches the given name and a type that matches the given type 1007 * Add all matching layers to the found_list 1008 * Do not add if found loader_layer_properties is already 1009 * on the found_list. 1010 */ 1011static void loader_find_layer_name_add_list( 1012 const struct loader_instance *inst, 1013 const char *name, 1014 const enum layer_type type, 1015 const struct loader_layer_list *search_list, 1016 struct loader_layer_list *found_list) 1017{ 1018 bool found = false; 1019 for (uint32_t i = 0; i < search_list->count; i++) { 1020 struct loader_layer_properties *layer_prop = &search_list->list[i]; 1021 if (0 == strcmp(layer_prop->info.layerName, name) && 1022 (layer_prop->type & type)) { 1023 /* Found a layer with the same name, add to found_list */ 1024 loader_add_to_layer_list(inst, found_list, 1, layer_prop); 1025 found = true; 1026 } 1027 } 1028 if (!found) { 1029 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Warning, couldn't find layer name %s to activate", name); 1030 } 1031} 1032 1033static VkExtensionProperties *get_extension_property( 1034 const char *name, 1035 const struct loader_extension_list *list) 1036{ 1037 for (uint32_t i = 0; i < list->count; i++) { 1038 if (strcmp(name, list->list[i].extensionName) == 0) 1039 return &list->list[i]; 1040 } 1041 return NULL; 1042} 1043 1044static VkExtensionProperties *get_dev_extension_property( 1045 const char *name, 1046 const struct loader_device_extension_list *list) 1047{ 1048 for (uint32_t i = 0; i < list->count; i++) { 1049 if (strcmp(name, list->list[i].props.extensionName) == 0) 1050 return &list->list[i].props; 1051 } 1052 return NULL; 1053} 1054 1055/* 1056 * For Instance extensions implemented within the loader (i.e. DEBUG_REPORT 1057 * the extension must provide two entry points for the loader to use: 1058 * - "trampoline" entry point - this is the address returned by GetProcAddr 1059 * and will always do what's necessary to support a global call. 1060 * - "terminator" function - this function will be put at the end of the 1061 * instance chain and will contain the necessary logic to call / process 1062 * the extension for the appropriate ICDs that are available. 1063 * There is no generic mechanism for including these functions, the references 1064 * must be placed into the appropriate loader entry points. 1065 * GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr requests 1066 * loader_coalesce_extensions(void) - add extension records to the list of global 1067 * extension available to the app. 1068 * instance_disp - add function pointer for terminator function to this array. 1069 * The extension itself should be in a separate file that will be 1070 * linked directly with the loader. 1071 */ 1072 1073void loader_get_icd_loader_instance_extensions( 1074 const struct loader_instance *inst, 1075 struct loader_icd_libs *icd_libs, 1076 struct loader_extension_list *inst_exts) 1077{ 1078 struct loader_extension_list icd_exts; 1079 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Build ICD instance extension list"); 1080 // traverse scanned icd list adding non-duplicate extensions to the list 1081 for (uint32_t i = 0; i < icd_libs->count; i++) { 1082 loader_init_generic_list(inst, (struct loader_generic_list *) &icd_exts, 1083 sizeof(VkExtensionProperties)); 1084 loader_add_instance_extensions(inst, icd_libs->list[i].EnumerateInstanceExtensionProperties, 1085 icd_libs->list[i].lib_name, 1086 &icd_exts); 1087 loader_add_to_ext_list(inst, inst_exts, 1088 icd_exts.count, 1089 icd_exts.list); 1090 loader_destroy_generic_list(inst, (struct loader_generic_list *) &icd_exts); 1091 }; 1092 1093 // Traverse loader's extensions, adding non-duplicate extensions to the list 1094 wsi_add_instance_extensions(inst, inst_exts); 1095 debug_report_add_instance_extensions(inst, inst_exts); 1096} 1097 1098struct loader_physical_device *loader_get_physical_device(const VkPhysicalDevice physdev) 1099{ 1100 uint32_t i; 1101 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 1102 for (i = 0; i < inst->total_gpu_count; i++) { 1103 //TODO this aliases physDevices within instances, need for this 1104 // function to go away 1105 if (inst->phys_devs[i].disp == loader_get_instance_dispatch(physdev)) { 1106 return &inst->phys_devs[i]; 1107 } 1108 } 1109 } 1110 return NULL; 1111} 1112 1113struct loader_icd *loader_get_icd_and_device(const VkDevice device, 1114 struct loader_device **found_dev) 1115{ 1116 *found_dev = NULL; 1117 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 1118 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) { 1119 for (struct loader_device *dev = icd->logical_device_list; dev; dev = dev->next) 1120 /* Value comparison of device prevents object wrapping by layers */ 1121 if (loader_get_dispatch(dev->device) == loader_get_dispatch(device)) { 1122 *found_dev = dev; 1123 return icd; 1124 } 1125 } 1126 } 1127 return NULL; 1128} 1129 1130static void loader_destroy_logical_device(const struct loader_instance *inst, 1131 struct loader_device *dev) 1132{ 1133 loader_heap_free(inst, dev->app_extension_props); 1134 if (dev->activated_layer_list.count) 1135 loader_destroy_layer_list(inst, &dev->activated_layer_list); 1136 loader_heap_free(inst, dev); 1137} 1138 1139static struct loader_device *loader_add_logical_device( 1140 const struct loader_instance *inst, 1141 const VkDevice dev, 1142 struct loader_device **device_list) 1143{ 1144 struct loader_device *new_dev; 1145 1146 new_dev = loader_heap_alloc(inst, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); 1147 if (!new_dev) { 1148 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Failed to alloc struct laoder-device"); 1149 return NULL; 1150 } 1151 1152 memset(new_dev, 0, sizeof(struct loader_device)); 1153 1154 new_dev->next = *device_list; 1155 new_dev->device = dev; 1156 *device_list = new_dev; 1157 return new_dev; 1158} 1159 1160void loader_remove_logical_device( 1161 const struct loader_instance *inst, 1162 struct loader_icd *icd, 1163 struct loader_device *found_dev) 1164{ 1165 struct loader_device *dev, *prev_dev; 1166 1167 if (!icd || !found_dev) 1168 return; 1169 1170 prev_dev = NULL; 1171 dev = icd->logical_device_list; 1172 while (dev && dev != found_dev) { 1173 prev_dev = dev; 1174 dev = dev->next; 1175 } 1176 1177 if (prev_dev) 1178 prev_dev->next = found_dev->next; 1179 else 1180 icd->logical_device_list = found_dev->next; 1181 loader_destroy_logical_device(inst, found_dev); 1182} 1183 1184 1185static void loader_icd_destroy( 1186 struct loader_instance *ptr_inst, 1187 struct loader_icd *icd) 1188{ 1189 ptr_inst->total_icd_count--; 1190 for (struct loader_device *dev = icd->logical_device_list; dev; ) { 1191 struct loader_device *next_dev = dev->next; 1192 loader_destroy_logical_device(ptr_inst, dev); 1193 dev = next_dev; 1194 } 1195 1196 loader_heap_free(ptr_inst, icd); 1197} 1198 1199static struct loader_icd * loader_icd_create(const struct loader_instance *inst) 1200{ 1201 struct loader_icd *icd; 1202 1203 icd = loader_heap_alloc(inst, sizeof(*icd), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1204 if (!icd) 1205 return NULL; 1206 1207 memset(icd, 0, sizeof(*icd)); 1208 1209 return icd; 1210} 1211 1212static struct loader_icd *loader_icd_add( 1213 struct loader_instance *ptr_inst, 1214 const struct loader_scanned_icds *icd_lib) 1215{ 1216 struct loader_icd *icd; 1217 1218 icd = loader_icd_create(ptr_inst); 1219 if (!icd) 1220 return NULL; 1221 1222 icd->this_icd_lib = icd_lib; 1223 icd->this_instance = ptr_inst; 1224 1225 /* prepend to the list */ 1226 icd->next = ptr_inst->icds; 1227 ptr_inst->icds = icd; 1228 ptr_inst->total_icd_count++; 1229 1230 return icd; 1231} 1232 1233void loader_scanned_icd_clear( 1234 const struct loader_instance *inst, 1235 struct loader_icd_libs *icd_libs) 1236{ 1237 if (icd_libs->capacity == 0) 1238 return; 1239 for (uint32_t i = 0; i < icd_libs->count; i++) { 1240 loader_platform_close_library(icd_libs->list[i].handle); 1241 loader_heap_free(inst, icd_libs->list[i].lib_name); 1242 } 1243 loader_heap_free(inst, icd_libs->list); 1244 icd_libs->capacity = 0; 1245 icd_libs->count = 0; 1246 icd_libs->list = NULL; 1247} 1248 1249static void loader_scanned_icd_init(const struct loader_instance *inst, 1250 struct loader_icd_libs *icd_libs) 1251{ 1252 loader_scanned_icd_clear(inst, icd_libs); 1253 icd_libs->capacity = 8 * sizeof(struct loader_scanned_icds); 1254 icd_libs->list = loader_heap_alloc(inst, icd_libs->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1255 1256} 1257 1258static void loader_scanned_icd_add( 1259 const struct loader_instance *inst, 1260 struct loader_icd_libs *icd_libs, 1261 const char *filename, 1262 uint32_t api_version) 1263{ 1264 loader_platform_dl_handle handle; 1265 PFN_vkCreateInstance fp_create_inst; 1266 PFN_vkEnumerateInstanceExtensionProperties fp_get_global_ext_props; 1267 PFN_vkGetInstanceProcAddr fp_get_proc_addr; 1268 struct loader_scanned_icds *new_node; 1269 1270 /* TODO implement ref counting of libraries, for now this function leaves 1271 libraries open and the scanned_icd_clear closes them */ 1272 // Used to call: dlopen(filename, RTLD_LAZY); 1273 handle = loader_platform_open_library(filename); 1274 if (!handle) { 1275 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, loader_platform_open_library_error(filename)); 1276 return; 1277 } 1278 1279#define LOOKUP_LD(func_ptr, func) do { \ 1280 func_ptr = (PFN_vk ##func) loader_platform_get_proc_address(handle, "vk" #func); \ 1281 if (!func_ptr) { \ 1282 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, loader_platform_get_proc_address_error("vk" #func)); \ 1283 return; \ 1284 } \ 1285} while (0) 1286 1287 LOOKUP_LD(fp_get_proc_addr, GetInstanceProcAddr); 1288 LOOKUP_LD(fp_create_inst, CreateInstance); 1289 LOOKUP_LD(fp_get_global_ext_props, EnumerateInstanceExtensionProperties); 1290 1291#undef LOOKUP_LD 1292 1293 // check for enough capacity 1294 if ((icd_libs->count * sizeof(struct loader_scanned_icds)) >= icd_libs->capacity) { 1295 1296 icd_libs->list = loader_heap_realloc(inst, 1297 icd_libs->list, 1298 icd_libs->capacity, 1299 icd_libs->capacity * 2, 1300 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1301 // double capacity 1302 icd_libs->capacity *= 2; 1303 } 1304 new_node = &(icd_libs->list[icd_libs->count]); 1305 1306 new_node->handle = handle; 1307 new_node->api_version = api_version; 1308 new_node->GetInstanceProcAddr = fp_get_proc_addr; 1309 new_node->CreateInstance = fp_create_inst; 1310 new_node->EnumerateInstanceExtensionProperties = fp_get_global_ext_props; 1311 1312 new_node->lib_name = (char *) loader_heap_alloc(inst, 1313 strlen(filename) + 1, 1314 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1315 if (!new_node->lib_name) { 1316 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Out of memory can't add icd"); 1317 return; 1318 } 1319 strcpy(new_node->lib_name, filename); 1320 icd_libs->count++; 1321} 1322 1323static bool loader_icd_init_entrys(struct loader_icd *icd, 1324 VkInstance inst, 1325 const PFN_vkGetInstanceProcAddr fp_gipa) 1326{ 1327 /* initialize entrypoint function pointers */ 1328 1329 #define LOOKUP_GIPA(func, required) do { \ 1330 icd->func = (PFN_vk ##func) fp_gipa(inst, "vk" #func); \ 1331 if (!icd->func && required) { \ 1332 loader_log((struct loader_instance *) inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, \ 1333 loader_platform_get_proc_address_error("vk" #func)); \ 1334 return false; \ 1335 } \ 1336 } while (0) 1337 1338 LOOKUP_GIPA(GetDeviceProcAddr, true); 1339 LOOKUP_GIPA(DestroyInstance, true); 1340 LOOKUP_GIPA(EnumeratePhysicalDevices, true); 1341 LOOKUP_GIPA(GetPhysicalDeviceFeatures, true); 1342 LOOKUP_GIPA(GetPhysicalDeviceFormatProperties, true); 1343 LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties, true); 1344 LOOKUP_GIPA(CreateDevice, true); 1345 LOOKUP_GIPA(GetPhysicalDeviceProperties, true); 1346 LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties, true); 1347 LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties, true); 1348 LOOKUP_GIPA(EnumerateDeviceExtensionProperties, true); 1349 LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties, true); 1350 LOOKUP_GIPA(CreateDebugReportCallbackEXT, false); 1351 LOOKUP_GIPA(DestroyDebugReportCallbackEXT, false); 1352 LOOKUP_GIPA(GetPhysicalDeviceSurfaceSupportKHR, false); 1353 LOOKUP_GIPA(GetPhysicalDeviceSurfaceCapabilitiesKHR, false); 1354 LOOKUP_GIPA(GetPhysicalDeviceSurfaceFormatsKHR, false); 1355 LOOKUP_GIPA(GetPhysicalDeviceSurfacePresentModesKHR, false); 1356#ifdef VK_USE_PLATFORM_WIN32_KHR 1357 LOOKUP_GIPA(GetPhysicalDeviceWin32PresentationSupportKHR, false); 1358#endif 1359#ifdef VK_USE_PLATFORM_XCB_KHR 1360 LOOKUP_GIPA(GetPhysicalDeviceXcbPresentationSupportKHR, false); 1361#endif 1362 1363#undef LOOKUP_GIPA 1364 1365 return true; 1366} 1367 1368static void loader_debug_init(void) 1369{ 1370 const char *env; 1371 1372 if (g_loader_debug > 0) 1373 return; 1374 1375 g_loader_debug = 0; 1376 1377 /* parse comma-separated debug options */ 1378 env = getenv("VK_LOADER_DEBUG"); 1379 while (env) { 1380 const char *p = strchr(env, ','); 1381 size_t len; 1382 1383 if (p) 1384 len = p - env; 1385 else 1386 len = strlen(env); 1387 1388 if (len > 0) { 1389 if (strncmp(env, "all", len) == 0) { 1390 g_loader_debug = ~0u; 1391 g_loader_log_msgs = ~0u; 1392 } else if (strncmp(env, "warn", len) == 0) { 1393 g_loader_debug |= LOADER_WARN_BIT; 1394 g_loader_log_msgs |= VK_DEBUG_REPORT_WARN_BIT_EXT; 1395 } else if (strncmp(env, "info", len) == 0) { 1396 g_loader_debug |= LOADER_INFO_BIT; 1397 g_loader_log_msgs |= VK_DEBUG_REPORT_INFO_BIT_EXT; 1398 } else if (strncmp(env, "perf", len) == 0) { 1399 g_loader_debug |= LOADER_PERF_BIT; 1400 g_loader_log_msgs |= VK_DEBUG_REPORT_PERF_WARN_BIT_EXT; 1401 } else if (strncmp(env, "error", len) == 0) { 1402 g_loader_debug |= LOADER_ERROR_BIT; 1403 g_loader_log_msgs |= VK_DEBUG_REPORT_ERROR_BIT_EXT; 1404 } else if (strncmp(env, "debug", len) == 0) { 1405 g_loader_debug |= LOADER_DEBUG_BIT; 1406 g_loader_log_msgs |= VK_DEBUG_REPORT_DEBUG_BIT_EXT; 1407 } 1408 } 1409 1410 if (!p) 1411 break; 1412 1413 env = p + 1; 1414 } 1415} 1416 1417void loader_initialize(void) 1418{ 1419 // initialize mutexs 1420 loader_platform_thread_create_mutex(&loader_lock); 1421 loader_platform_thread_create_mutex(&loader_json_lock); 1422 1423 // initialize logging 1424 loader_debug_init(); 1425 1426 // initial cJSON to use alloc callbacks 1427 cJSON_Hooks alloc_fns = { 1428 .malloc_fn = loader_tls_heap_alloc, 1429 .free_fn = loader_tls_heap_free, 1430 }; 1431 cJSON_InitHooks(&alloc_fns); 1432} 1433 1434struct loader_manifest_files { 1435 uint32_t count; 1436 char **filename_list; 1437}; 1438 1439/** 1440 * Get next file or dirname given a string list or registry key path 1441 * 1442 * \returns 1443 * A pointer to first char in the next path. 1444 * The next path (or NULL) in the list is returned in next_path. 1445 * Note: input string is modified in some cases. PASS IN A COPY! 1446 */ 1447static char *loader_get_next_path(char *path) 1448{ 1449 uint32_t len; 1450 char *next; 1451 1452 if (path == NULL) 1453 return NULL; 1454 next = strchr(path, PATH_SEPERATOR); 1455 if (next == NULL) { 1456 len = (uint32_t) strlen(path); 1457 next = path + len; 1458 } 1459 else { 1460 *next = '\0'; 1461 next++; 1462 } 1463 1464 return next; 1465} 1466 1467/** 1468 * Given a path which is absolute or relative, expand the path if relative or 1469 * leave the path unmodified if absolute. The base path to prepend to relative 1470 * paths is given in rel_base. 1471 * 1472 * \returns 1473 * A string in out_fullpath of the full absolute path 1474 */ 1475static void loader_expand_path(const char *path, 1476 const char *rel_base, 1477 size_t out_size, 1478 char *out_fullpath) 1479{ 1480 if (loader_platform_is_path_absolute(path)) { 1481 // do not prepend a base to an absolute path 1482 rel_base = ""; 1483 } 1484 1485 loader_platform_combine_path(out_fullpath, out_size, rel_base, path, NULL); 1486} 1487 1488/** 1489 * Given a filename (file) and a list of paths (dir), try to find an existing 1490 * file in the paths. If filename already is a path then no 1491 * searching in the given paths. 1492 * 1493 * \returns 1494 * A string in out_fullpath of either the full path or file. 1495 */ 1496static void loader_get_fullpath(const char *file, 1497 const char *dirs, 1498 size_t out_size, 1499 char *out_fullpath) 1500{ 1501 if (!loader_platform_is_path(file) && *dirs) { 1502 char *dirs_copy, *dir, *next_dir; 1503 1504 dirs_copy = loader_stack_alloc(strlen(dirs) + 1); 1505 strcpy(dirs_copy, dirs); 1506 1507 //find if file exists after prepending paths in given list 1508 for (dir = dirs_copy; 1509 *dir && (next_dir = loader_get_next_path(dir)); 1510 dir = next_dir) { 1511 loader_platform_combine_path(out_fullpath, out_size, dir, file, NULL); 1512 if (loader_platform_file_exists(out_fullpath)) { 1513 return; 1514 } 1515 } 1516 } 1517 1518 snprintf(out_fullpath, out_size, "%s", file); 1519} 1520 1521/** 1522 * Read a JSON file into a buffer. 1523 * 1524 * \returns 1525 * A pointer to a cJSON object representing the JSON parse tree. 1526 * This returned buffer should be freed by caller. 1527 */ 1528static cJSON *loader_get_json(const struct loader_instance *inst, const char *filename) 1529{ 1530 FILE *file; 1531 char *json_buf; 1532 cJSON *json; 1533 uint64_t len; 1534 file = fopen(filename,"rb"); 1535 if (!file) { 1536 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Couldn't open JSON file %s", filename); 1537 return NULL; 1538 } 1539 fseek(file, 0, SEEK_END); 1540 len = ftell(file); 1541 fseek(file, 0, SEEK_SET); 1542 json_buf = (char*) loader_stack_alloc(len+1); 1543 if (json_buf == NULL) { 1544 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Out of memory can't get JSON file"); 1545 fclose(file); 1546 return NULL; 1547 } 1548 if (fread(json_buf, sizeof(char), len, file) != len) { 1549 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "fread failed can't get JSON file"); 1550 fclose(file); 1551 return NULL; 1552 } 1553 fclose(file); 1554 json_buf[len] = '\0'; 1555 1556 //parse text from file 1557 json = cJSON_Parse(json_buf); 1558 if (json == NULL) 1559 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Can't parse JSON file %s", filename); 1560 return json; 1561} 1562 1563/** 1564 * Do a deep copy of the loader_layer_properties structure. 1565 */ 1566static void loader_copy_layer_properties( 1567 const struct loader_instance *inst, 1568 struct loader_layer_properties *dst, 1569 struct loader_layer_properties *src) 1570{ 1571 uint32_t cnt, i; 1572 memcpy(dst, src, sizeof (*src)); 1573 dst->instance_extension_list.list = loader_heap_alloc( 1574 inst, 1575 sizeof(VkExtensionProperties) * 1576 src->instance_extension_list.count, 1577 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1578 dst->instance_extension_list.capacity = sizeof(VkExtensionProperties) * 1579 src->instance_extension_list.count; 1580 memcpy(dst->instance_extension_list.list, src->instance_extension_list.list, 1581 dst->instance_extension_list.capacity); 1582 dst->device_extension_list.list = loader_heap_alloc( 1583 inst, 1584 sizeof(struct loader_dev_ext_props) * 1585 src->device_extension_list.count, 1586 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1587 1588 dst->device_extension_list.capacity = sizeof(struct loader_dev_ext_props) * 1589 src->device_extension_list.count; 1590 memcpy(dst->device_extension_list.list, src->device_extension_list.list, 1591 dst->device_extension_list.capacity); 1592 if (src->device_extension_list.count > 0 && 1593 src->device_extension_list.list->entrypoint_count > 0) { 1594 cnt = src->device_extension_list.list->entrypoint_count; 1595 dst->device_extension_list.list->entrypoints = loader_heap_alloc( 1596 inst, 1597 sizeof(char *) * cnt, 1598 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1599 for (i = 0; i < cnt; i++) { 1600 dst->device_extension_list.list->entrypoints[i] = loader_heap_alloc( 1601 inst, 1602 strlen(src->device_extension_list.list->entrypoints[i]) + 1, 1603 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 1604 strcpy(dst->device_extension_list.list->entrypoints[i], 1605 src->device_extension_list.list->entrypoints[i]); 1606 } 1607 } 1608} 1609 1610/** 1611 * Given a cJSON struct (json) of the top level JSON object from layer manifest 1612 * file, add entry to the layer_list. 1613 * Fill out the layer_properties in this list entry from the input cJSON object. 1614 * 1615 * \returns 1616 * void 1617 * layer_list has a new entry and initialized accordingly. 1618 * If the json input object does not have all the required fields no entry 1619 * is added to the list. 1620 */ 1621static void loader_add_layer_properties(const struct loader_instance *inst, 1622 struct loader_layer_list *layer_instance_list, 1623 struct loader_layer_list *layer_device_list, 1624 cJSON *json, 1625 bool is_implicit, 1626 char *filename) 1627{ 1628 /* Fields in layer manifest file that are required: 1629 * (required) “file_format_version” 1630 * following are required in the "layer" object: 1631 * (required) "name" 1632 * (required) "type" 1633 * (required) “library_path” 1634 * (required) “api_version” 1635 * (required) “implementation_version” 1636 * (required) “description” 1637 * (required for implicit layers) “disable_environment” 1638 * 1639 * First get all required items and if any missing abort 1640 */ 1641 1642 cJSON *item, *layer_node, *ext_item; 1643 char *temp; 1644 char *name, *type, *library_path, *api_version; 1645 char *implementation_version, *description; 1646 cJSON *disable_environment; 1647 int i, j; 1648 VkExtensionProperties ext_prop; 1649 item = cJSON_GetObjectItem(json, "file_format_version"); 1650 if (item == NULL) { 1651 return; 1652 } 1653 char *file_vers = cJSON_PrintUnformatted(item); 1654 loader_log(inst, VK_DEBUG_REPORT_INFO_BIT_EXT, 0, "Found manifest file %s, version %s", 1655 filename, file_vers); 1656 if (strcmp(file_vers, "\"1.0.0\"") != 0) 1657 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Unexpected manifest file version (expected 1.0.0), may cause errors"); 1658 loader_tls_heap_free(file_vers); 1659 1660 layer_node = cJSON_GetObjectItem(json, "layer"); 1661 if (layer_node == NULL) { 1662 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Can't find \"layer\" object in manifest JSON file, skipping"); 1663 return; 1664 } 1665 1666 // loop through all "layer" objects in the file 1667 do { 1668#define GET_JSON_OBJECT(node, var) { \ 1669 var = cJSON_GetObjectItem(node, #var); \ 1670 if (var == NULL) { \ 1671 layer_node = layer_node->next; \ 1672 continue; \ 1673 } \ 1674 } 1675#define GET_JSON_ITEM(node, var) { \ 1676 item = cJSON_GetObjectItem(node, #var); \ 1677 if (item == NULL) { \ 1678 layer_node = layer_node->next; \ 1679 continue; \ 1680 } \ 1681 temp = cJSON_Print(item); \ 1682 temp[strlen(temp) - 1] = '\0'; \ 1683 var = loader_stack_alloc(strlen(temp) + 1); \ 1684 strcpy(var, &temp[1]); \ 1685 loader_tls_heap_free(temp); \ 1686 } 1687 GET_JSON_ITEM(layer_node, name) 1688 GET_JSON_ITEM(layer_node, type) 1689 GET_JSON_ITEM(layer_node, library_path) 1690 GET_JSON_ITEM(layer_node, api_version) 1691 GET_JSON_ITEM(layer_node, implementation_version) 1692 GET_JSON_ITEM(layer_node, description) 1693 if (is_implicit) { 1694 GET_JSON_OBJECT(layer_node, disable_environment) 1695 } 1696#undef GET_JSON_ITEM 1697#undef GET_JSON_OBJECT 1698 1699 // add list entry 1700 struct loader_layer_properties *props=NULL; 1701 if (!strcmp(type, "DEVICE")) { 1702 if (layer_device_list == NULL) { 1703 layer_node = layer_node->next; 1704 continue; 1705 } 1706 props = loader_get_next_layer_property(inst, layer_device_list); 1707 props->type = (is_implicit) ? VK_LAYER_TYPE_DEVICE_IMPLICIT : VK_LAYER_TYPE_DEVICE_EXPLICIT; 1708 } 1709 if (!strcmp(type, "INSTANCE")) { 1710 if (layer_instance_list == NULL) { 1711 layer_node = layer_node->next; 1712 continue; 1713 } 1714 props = loader_get_next_layer_property(inst, layer_instance_list); 1715 props->type = (is_implicit) ? VK_LAYER_TYPE_INSTANCE_IMPLICIT : VK_LAYER_TYPE_INSTANCE_EXPLICIT; 1716 } 1717 if (!strcmp(type, "GLOBAL")) { 1718 if (layer_instance_list != NULL) 1719 props = loader_get_next_layer_property(inst, layer_instance_list); 1720 else if (layer_device_list != NULL) 1721 props = loader_get_next_layer_property(inst, layer_device_list); 1722 else { 1723 layer_node = layer_node->next; 1724 continue; 1725 } 1726 props->type = (is_implicit) ? VK_LAYER_TYPE_GLOBAL_IMPLICIT : VK_LAYER_TYPE_GLOBAL_EXPLICIT; 1727 } 1728 1729 if (props == NULL) { 1730 layer_node = layer_node->next; 1731 continue; 1732 } 1733 1734 strncpy(props->info.layerName, name, sizeof (props->info.layerName)); 1735 props->info.layerName[sizeof (props->info.layerName) - 1] = '\0'; 1736 1737 char *fullpath = props->lib_name; 1738 char *rel_base; 1739 if (loader_platform_is_path(library_path)) { 1740 // a relative or absolute path 1741 char *name_copy = loader_stack_alloc(strlen(filename) + 1); 1742 strcpy(name_copy, filename); 1743 rel_base = loader_platform_dirname(name_copy); 1744 loader_expand_path(library_path, rel_base, MAX_STRING_SIZE, fullpath); 1745 } else { 1746 // a filename which is assumed in a system directory 1747 loader_get_fullpath(library_path, DEFAULT_VK_LAYERS_PATH, MAX_STRING_SIZE, fullpath); 1748 } 1749 props->info.specVersion = loader_make_version(api_version); 1750 props->info.implementationVersion = atoi(implementation_version); 1751 strncpy((char *) props->info.description, description, sizeof (props->info.description)); 1752 props->info.description[sizeof (props->info.description) - 1] = '\0'; 1753 if (is_implicit) { 1754 strncpy(props->disable_env_var.name, disable_environment->child->string, sizeof (props->disable_env_var.name)); 1755 props->disable_env_var.name[sizeof (props->disable_env_var.name) - 1] = '\0'; 1756 strncpy(props->disable_env_var.value, disable_environment->child->valuestring, sizeof (props->disable_env_var.value)); 1757 props->disable_env_var.value[sizeof (props->disable_env_var.value) - 1] = '\0'; 1758 } 1759 1760 /** 1761 * Now get all optional items and objects and put in list: 1762 * functions 1763 * instance_extensions 1764 * device_extensions 1765 * enable_environment (implicit layers only) 1766 * disable_environment (implicit_layers_only) 1767 */ 1768#define GET_JSON_OBJECT(node, var) { \ 1769 var = cJSON_GetObjectItem(node, #var); \ 1770 } 1771#define GET_JSON_ITEM(node, var) { \ 1772 item = cJSON_GetObjectItem(node, #var); \ 1773 if (item != NULL) { \ 1774 temp = cJSON_Print(item); \ 1775 temp[strlen(temp) - 1] = '\0'; \ 1776 var = loader_stack_alloc(strlen(temp) + 1);\ 1777 strcpy(var, &temp[1]); \ 1778 loader_tls_heap_free(temp); \ 1779 } \ 1780 } 1781 1782 cJSON *instance_extensions, *device_extensions, *functions, *enable_environment; 1783 cJSON *entrypoints; 1784 char *vkGetInstanceProcAddr, *vkGetDeviceProcAddr, *spec_version; 1785 char **entry_array; 1786 vkGetInstanceProcAddr = NULL; 1787 vkGetDeviceProcAddr = NULL; 1788 spec_version = NULL; 1789 entrypoints = NULL; 1790 entry_array = NULL; 1791 /** 1792 * functions 1793 * vkGetInstanceProcAddr 1794 * vkGetDeviceProcAddr 1795 */ 1796 GET_JSON_OBJECT(layer_node, functions) 1797 if (functions != NULL) { 1798 GET_JSON_ITEM(functions, vkGetInstanceProcAddr) 1799 GET_JSON_ITEM(functions, vkGetDeviceProcAddr) 1800 if (vkGetInstanceProcAddr != NULL) 1801 strncpy(props->functions.str_gipa, vkGetInstanceProcAddr, sizeof (props->functions.str_gipa)); 1802 props->functions.str_gipa[sizeof (props->functions.str_gipa) - 1] = '\0'; 1803 if (vkGetDeviceProcAddr != NULL) 1804 strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr, sizeof (props->functions.str_gdpa)); 1805 props->functions.str_gdpa[sizeof (props->functions.str_gdpa) - 1] = '\0'; 1806 } 1807 /** 1808 * instance_extensions 1809 * array of 1810 * name 1811 * spec_version 1812 */ 1813 GET_JSON_OBJECT(layer_node, instance_extensions) 1814 if (instance_extensions != NULL) { 1815 int count = cJSON_GetArraySize(instance_extensions); 1816 for (i = 0; i < count; i++) { 1817 ext_item = cJSON_GetArrayItem(instance_extensions, i); 1818 GET_JSON_ITEM(ext_item, name) 1819 GET_JSON_ITEM(ext_item, spec_version) 1820 if (name != NULL) { 1821 strncpy(ext_prop.extensionName, name, sizeof (ext_prop.extensionName)); 1822 ext_prop.extensionName[sizeof (ext_prop.extensionName) - 1] = '\0'; 1823 } 1824 ext_prop.specVersion = atoi(spec_version); 1825 loader_add_to_ext_list(inst, &props->instance_extension_list, 1, &ext_prop); 1826 } 1827 } 1828 /** 1829 * device_extensions 1830 * array of 1831 * name 1832 * spec_version 1833 * entrypoints 1834 */ 1835 GET_JSON_OBJECT(layer_node, device_extensions) 1836 if (device_extensions != NULL) { 1837 int count = cJSON_GetArraySize(device_extensions); 1838 for (i = 0; i < count; i++) { 1839 ext_item = cJSON_GetArrayItem(device_extensions, i); 1840 GET_JSON_ITEM(ext_item, name) 1841 GET_JSON_ITEM(ext_item, spec_version) 1842 if (name != NULL) { 1843 strncpy(ext_prop.extensionName, name, sizeof (ext_prop.extensionName)); 1844 ext_prop.extensionName[sizeof (ext_prop.extensionName) - 1] = '\0'; 1845 } 1846 ext_prop.specVersion = atoi(spec_version); 1847 //entrypoints = cJSON_GetObjectItem(ext_item, "entrypoints"); 1848 GET_JSON_OBJECT(ext_item, entrypoints) 1849 int entry_count; 1850 if (entrypoints == NULL) 1851 continue; 1852 entry_count = cJSON_GetArraySize(entrypoints); 1853 if (entry_count) 1854 entry_array = (char **) loader_stack_alloc(sizeof(char *) * entry_count); 1855 for (j = 0; j < entry_count; j++) { 1856 ext_item = cJSON_GetArrayItem(entrypoints, j); 1857 if (ext_item != NULL) { 1858 temp = cJSON_Print(ext_item); 1859 temp[strlen(temp) - 1] = '\0'; 1860 entry_array[j] = loader_stack_alloc(strlen(temp) + 1); 1861 strcpy(entry_array[j], &temp[1]); 1862 loader_tls_heap_free(temp); 1863 } 1864 } 1865 loader_add_to_dev_ext_list(inst, &props->device_extension_list, 1866 &ext_prop, entry_count, entry_array); 1867 } 1868 } 1869 if (is_implicit) { 1870 GET_JSON_OBJECT(layer_node, enable_environment) 1871 strncpy(props->enable_env_var.name, enable_environment->child->string, sizeof (props->enable_env_var.name)); 1872 props->enable_env_var.name[sizeof (props->enable_env_var.name) - 1] = '\0'; 1873 strncpy(props->enable_env_var.value, enable_environment->child->valuestring, sizeof (props->enable_env_var.value)); 1874 props->enable_env_var.value[sizeof (props->enable_env_var.value) - 1] = '\0'; 1875 //TODO add disable_environment for implicit layers 1876 } 1877#undef GET_JSON_ITEM 1878#undef GET_JSON_OBJECT 1879 // for global layers need to add them to both device and instance list 1880 if (!strcmp(type, "GLOBAL")) { 1881 struct loader_layer_properties *dev_props; 1882 if (layer_instance_list == NULL || layer_device_list == NULL) { 1883 layer_node = layer_node->next; 1884 continue; 1885 } 1886 dev_props = loader_get_next_layer_property(inst, layer_device_list); 1887 //copy into device layer list 1888 loader_copy_layer_properties(inst, dev_props, props); 1889 } 1890 layer_node = layer_node->next; 1891 } while (layer_node != NULL); 1892 return; 1893} 1894 1895/** 1896 * Find the Vulkan library manifest files. 1897 * 1898 * This function scans the location or env_override directories/files 1899 * for a list of JSON manifest files. If env_override is non-NULL 1900 * and has a valid value. Then the location is ignored. Otherwise 1901 * location is used to look for manifest files. The location 1902 * is interpreted as Registry path on Windows and a directory path(s) 1903 * on Linux. 1904 * 1905 * \returns 1906 * A string list of manifest files to be opened in out_files param. 1907 * List has a pointer to string for each manifest filename. 1908 * When done using the list in out_files, pointers should be freed. 1909 * Location or override string lists can be either files or directories as follows: 1910 * | location | override 1911 * -------------------------------- 1912 * Win ICD | files | files 1913 * Win Layer | files | dirs 1914 * Linux ICD | dirs | files 1915 * Linux Layer| dirs | dirs 1916 */ 1917static void loader_get_manifest_files(const struct loader_instance *inst, 1918 const char *env_override, 1919 bool is_layer, 1920 const char *location, 1921 struct loader_manifest_files *out_files) 1922{ 1923 char *override = NULL; 1924 char *loc; 1925 char *file, *next_file, *name; 1926 size_t alloced_count = 64; 1927 char full_path[2048]; 1928 DIR *sysdir = NULL; 1929 bool list_is_dirs = false; 1930 struct dirent *dent; 1931 1932 out_files->count = 0; 1933 out_files->filename_list = NULL; 1934 1935 if (env_override != NULL && (override = getenv(env_override))) { 1936#if !defined(_WIN32) 1937 if (geteuid() != getuid()) { 1938 /* Don't allow setuid apps to use the env var: */ 1939 override = NULL; 1940 } 1941#endif 1942 } 1943 1944 if (location == NULL) { 1945 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 1946 "Can't get manifest files with NULL location, env_override=%s", 1947 env_override); 1948 return; 1949 } 1950 1951#if defined(_WIN32) 1952 list_is_dirs = (is_layer && override != NULL) ? true : false; 1953#else 1954 list_is_dirs = (override == NULL || is_layer) ? true : false; 1955#endif 1956 // Make a copy of the input we are using so it is not modified 1957 // Also handle getting the location(s) from registry on Windows 1958 if (override == NULL) { 1959 loc = loader_stack_alloc(strlen(location) + 1); 1960 if (loc == NULL) { 1961 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Out of memory can't get manifest files"); 1962 return; 1963 } 1964 strcpy(loc, location); 1965#if defined(_WIN32) 1966 loc = loader_get_registry_files(inst, loc); 1967 if (loc == NULL) { 1968 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Registry lookup failed can't get manifest files"); 1969 return; 1970 } 1971#endif 1972 } 1973 else { 1974 loc = loader_stack_alloc(strlen(override) + 1); 1975 if (loc == NULL) { 1976 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Out of memory can't get manifest files"); 1977 return; 1978 } 1979 strcpy(loc, override); 1980 } 1981 1982 // Print out the paths being searched if debugging is enabled 1983 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Searching the following paths for manifest files: %s\n", loc); 1984 1985 file = loc; 1986 while (*file) { 1987 next_file = loader_get_next_path(file); 1988 if (list_is_dirs) { 1989 sysdir = opendir(file); 1990 name = NULL; 1991 if (sysdir) { 1992 dent = readdir(sysdir); 1993 if (dent == NULL) 1994 break; 1995 name = &(dent->d_name[0]); 1996 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1997 name = full_path; 1998 } 1999 } 2000 else { 2001#if defined(_WIN32) 2002 name = file; 2003#else 2004 // only Linux has relative paths 2005 char *dir; 2006 // make a copy of location so it isn't modified 2007 dir = loader_stack_alloc(strlen(loc) + 1); 2008 if (dir == NULL) { 2009 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Out of memory can't get manifest files"); 2010 return; 2011 } 2012 strcpy(dir, loc); 2013 2014 loader_get_fullpath(file, dir, sizeof(full_path), full_path); 2015 2016 name = full_path; 2017#endif 2018 } 2019 while (name) { 2020 /* Look for files ending with ".json" suffix */ 2021 uint32_t nlen = (uint32_t) strlen(name); 2022 const char *suf = name + nlen - 5; 2023 if ((nlen > 5) && !strncmp(suf, ".json", 5)) { 2024 if (out_files->count == 0) { 2025 out_files->filename_list = loader_heap_alloc(inst, 2026 alloced_count * sizeof(char *), 2027 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 2028 } 2029 else if (out_files->count == alloced_count) { 2030 out_files->filename_list = loader_heap_realloc(inst, 2031 out_files->filename_list, 2032 alloced_count * sizeof(char *), 2033 alloced_count * sizeof(char *) * 2, 2034 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 2035 alloced_count *= 2; 2036 } 2037 if (out_files->filename_list == NULL) { 2038 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Out of memory can't alloc manifest file list"); 2039 return; 2040 } 2041 out_files->filename_list[out_files->count] = loader_heap_alloc( 2042 inst, 2043 strlen(name) + 1, 2044 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); 2045 if (out_files->filename_list[out_files->count] == NULL) { 2046 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Out of memory can't get manifest files"); 2047 return; 2048 } 2049 strcpy(out_files->filename_list[out_files->count], name); 2050 out_files->count++; 2051 } else if (!list_is_dirs) { 2052 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Skipping manifest file %s, file name must end in .json", name); 2053 } 2054 if (list_is_dirs) { 2055 dent = readdir(sysdir); 2056 if (dent == NULL) 2057 break; 2058 name = &(dent->d_name[0]); 2059 loader_get_fullpath(name, file, sizeof(full_path), full_path); 2060 name = full_path; 2061 } 2062 else { 2063 break; 2064 } 2065 } 2066 if (sysdir) 2067 closedir(sysdir); 2068 file = next_file; 2069 } 2070 return; 2071} 2072 2073void loader_init_icd_lib_list() 2074{ 2075 2076} 2077 2078void loader_destroy_icd_lib_list() 2079{ 2080 2081} 2082/** 2083 * Try to find the Vulkan ICD driver(s). 2084 * 2085 * This function scans the default system loader path(s) or path 2086 * specified by the \c VK_ICD_FILENAMES environment variable in 2087 * order to find loadable VK ICDs manifest files. From these 2088 * manifest files it finds the ICD libraries. 2089 * 2090 * \returns 2091 * a list of icds that were discovered 2092 */ 2093void loader_icd_scan( 2094 const struct loader_instance *inst, 2095 struct loader_icd_libs *icds) 2096{ 2097 char *file_str; 2098 struct loader_manifest_files manifest_files; 2099 2100 loader_scanned_icd_init(inst, icds); 2101 // Get a list of manifest files for ICDs 2102 loader_get_manifest_files(inst, "VK_ICD_FILENAMES", false, 2103 DEFAULT_VK_DRIVERS_INFO, &manifest_files); 2104 if (manifest_files.count == 0) 2105 return; 2106 loader_platform_thread_lock_mutex(&loader_json_lock); 2107 for (uint32_t i = 0; i < manifest_files.count; i++) { 2108 file_str = manifest_files.filename_list[i]; 2109 if (file_str == NULL) 2110 continue; 2111 2112 cJSON *json; 2113 json = loader_get_json(inst, file_str); 2114 if (!json) 2115 continue; 2116 cJSON *item, *itemICD; 2117 item = cJSON_GetObjectItem(json, "file_format_version"); 2118 if (item == NULL) { 2119 loader_platform_thread_unlock_mutex(&loader_json_lock); 2120 return; 2121 } 2122 char *file_vers = cJSON_Print(item); 2123 loader_log(inst, VK_DEBUG_REPORT_INFO_BIT_EXT, 0, "Found manifest file %s, version %s", 2124 file_str, file_vers); 2125 if (strcmp(file_vers, "\"1.0.0\"") != 0) 2126 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Unexpected manifest file version (expected 1.0.0), may cause errors"); 2127 loader_tls_heap_free(file_vers); 2128 itemICD = cJSON_GetObjectItem(json, "ICD"); 2129 if (itemICD != NULL) { 2130 item = cJSON_GetObjectItem(itemICD, "library_path"); 2131 if (item != NULL) { 2132 char *temp= cJSON_Print(item); 2133 if (!temp || strlen(temp) == 0) { 2134 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 2135 loader_tls_heap_free(temp); 2136 loader_heap_free(inst, file_str); 2137 cJSON_Delete(json); 2138 continue; 2139 } 2140 //strip out extra quotes 2141 temp[strlen(temp) - 1] = '\0'; 2142 char *library_path = loader_stack_alloc(strlen(temp) + 1); 2143 strcpy(library_path, &temp[1]); 2144 loader_tls_heap_free(temp); 2145 if (!library_path || strlen(library_path) == 0) { 2146 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 2147 loader_heap_free(inst, file_str); 2148 cJSON_Delete(json); 2149 continue; 2150 } 2151 char fullpath[MAX_STRING_SIZE]; 2152 // Print out the paths being searched if debugging is enabled 2153 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Searching for ICD drivers named %s default dir %s\n", library_path, DEFAULT_VK_DRIVERS_PATH); 2154 if (loader_platform_is_path(library_path)) { 2155 // a relative or absolute path 2156 char *name_copy = loader_stack_alloc(strlen(file_str) + 1); 2157 char *rel_base; 2158 strcpy(name_copy, file_str); 2159 rel_base = loader_platform_dirname(name_copy); 2160 loader_expand_path(library_path, rel_base, sizeof(fullpath), fullpath); 2161 } else { 2162 // a filename which is assumed in a system directory 2163 loader_get_fullpath(library_path, DEFAULT_VK_DRIVERS_PATH, sizeof(fullpath), fullpath); 2164 } 2165 2166 uint32_t vers = 0; 2167 item = cJSON_GetObjectItem(itemICD, "api_version"); 2168 if (item != NULL) { 2169 temp= cJSON_Print(item); 2170 vers = loader_make_version(temp); 2171 loader_tls_heap_free(temp); 2172 } 2173 loader_scanned_icd_add(inst, icds, fullpath, vers); 2174 } 2175 else 2176 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Can't find \"library_path\" object in ICD JSON file %s, skipping", file_str); 2177 } 2178 else 2179 loader_log(inst, VK_DEBUG_REPORT_WARN_BIT_EXT, 0, "Can't find \"ICD\" object in ICD JSON file %s, skipping", file_str); 2180 2181 loader_heap_free(inst, file_str); 2182 cJSON_Delete(json); 2183 } 2184 loader_heap_free(inst, manifest_files.filename_list); 2185 loader_platform_thread_unlock_mutex(&loader_json_lock); 2186} 2187 2188 2189void loader_layer_scan( 2190 const struct loader_instance *inst, 2191 struct loader_layer_list *instance_layers, 2192 struct loader_layer_list *device_layers) 2193{ 2194 char *file_str; 2195 struct loader_manifest_files manifest_files; 2196 cJSON *json; 2197 uint32_t i; 2198 2199 // Get a list of manifest files for layers 2200 loader_get_manifest_files(inst, LAYERS_PATH_ENV, true, DEFAULT_VK_LAYERS_INFO, 2201 &manifest_files); 2202 if (manifest_files.count == 0) 2203 return; 2204 2205#if 0 //TODO 2206 /** 2207 * We need a list of the layer libraries, not just a list of 2208 * the layer properties (a layer library could expose more than 2209 * one layer property). This list of scanned layers would be 2210 * used to check for global and physicaldevice layer properties. 2211 */ 2212 if (!loader_init_layer_library_list(&loader.scanned_layer_libraries)) { 2213 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2214 "Alloc for layer list failed: %s line: %d", __FILE__, __LINE__); 2215 return; 2216 } 2217#endif 2218 2219 /* cleanup any previously scanned libraries */ 2220 loader_delete_layer_properties(inst, instance_layers); 2221 loader_delete_layer_properties(inst, device_layers); 2222 2223 loader_platform_thread_lock_mutex(&loader_json_lock); 2224 for (i = 0; i < manifest_files.count; i++) { 2225 file_str = manifest_files.filename_list[i]; 2226 if (file_str == NULL) 2227 continue; 2228 2229 // parse file into JSON struct 2230 json = loader_get_json(inst, file_str); 2231 if (!json) { 2232 continue; 2233 } 2234 2235 //TODO pass in implicit versus explicit bool 2236 //TODO error if device layers expose instance_extensions 2237 //TODO error if instance layers expose device extensions 2238 loader_add_layer_properties(inst, 2239 instance_layers, 2240 device_layers, 2241 json, 2242 false, 2243 file_str); 2244 2245 loader_heap_free(inst, file_str); 2246 cJSON_Delete(json); 2247 } 2248 loader_heap_free(inst, manifest_files.filename_list); 2249 loader_platform_thread_unlock_mutex(&loader_json_lock); 2250} 2251 2252static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_internal(VkInstance inst, const char * pName) 2253{ 2254 // inst is not wrapped 2255 if (inst == VK_NULL_HANDLE) { 2256 return NULL; 2257 } 2258 VkLayerInstanceDispatchTable* disp_table = * (VkLayerInstanceDispatchTable **) inst; 2259 void *addr; 2260 2261 if (!strcmp(pName, "vkGetInstanceProcAddr")) 2262 return (void *) loader_gpa_instance_internal; 2263 2264 if (disp_table == NULL) 2265 return NULL; 2266 2267 addr = loader_lookup_instance_dispatch_table(disp_table, pName); 2268 if (addr) { 2269 return addr; 2270 } 2271 2272 if (disp_table->GetInstanceProcAddr == NULL) { 2273 return NULL; 2274 } 2275 return disp_table->GetInstanceProcAddr(inst, pName); 2276} 2277 2278/** 2279 * Initialize device_ext dispatch table entry as follows: 2280 * If dev == NULL find all logical devices created within this instance and 2281 * init the entry (given by idx) in the ext dispatch table. 2282 * If dev != NULL only initialize the entry in the given dev's dispatch table. 2283 * The initialization value is gotten by calling down the device chain with GDPA. 2284 * If GDPA returns NULL then don't initialize the dispatch table entry. 2285 */ 2286static void loader_init_dispatch_dev_ext_entry(struct loader_instance *inst, 2287 struct loader_device *dev, 2288 uint32_t idx, 2289 const char *funcName) 2290 2291 { 2292 void *gdpa_value; 2293 if (dev != NULL) { 2294 gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr( 2295 dev->device, funcName); 2296 if (gdpa_value != NULL) 2297 dev->loader_dispatch.ext_dispatch.DevExt[idx] = (PFN_vkDevExt) gdpa_value; 2298 } else { 2299 for (uint32_t i = 0; i < inst->total_icd_count; i++) { 2300 struct loader_icd *icd = &inst->icds[i]; 2301 struct loader_device *dev = icd->logical_device_list; 2302 while (dev) { 2303 gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr( 2304 dev->device, funcName); 2305 if (gdpa_value != NULL) 2306 dev->loader_dispatch.ext_dispatch.DevExt[idx] = 2307 (PFN_vkDevExt) gdpa_value; 2308 dev = dev->next; 2309 } 2310 } 2311 } 2312 2313} 2314 2315/** 2316 * Find all dev extension in the hash table and initialize the dispatch table 2317 * for dev for each of those extension entrypoints found in hash table. 2318 2319 */ 2320static void loader_init_dispatch_dev_ext(struct loader_instance *inst, 2321 struct loader_device *dev) 2322{ 2323 for (uint32_t i = 0; i < MAX_NUM_DEV_EXTS; i++) { 2324 if (inst->disp_hash[i].func_name != NULL) 2325 loader_init_dispatch_dev_ext_entry(inst, dev, i, 2326 inst->disp_hash[i].func_name); 2327 } 2328} 2329 2330static bool loader_check_icds_for_address(struct loader_instance *inst, 2331 const char *funcName) 2332{ 2333 struct loader_icd *icd; 2334 icd = inst->icds; 2335 while (icd) { 2336 if (icd->this_icd_lib->GetInstanceProcAddr(icd->instance, funcName)) 2337 // this icd supports funcName 2338 return true; 2339 icd = icd->next; 2340 } 2341 2342 return false; 2343} 2344 2345static void loader_free_dev_ext_table(struct loader_instance *inst) 2346{ 2347 for (uint32_t i = 0; i < MAX_NUM_DEV_EXTS; i++) { 2348 loader_heap_free(inst, inst->disp_hash[i].func_name); 2349 loader_heap_free(inst, inst->disp_hash[i].list.index); 2350 2351 } 2352 memset(inst->disp_hash, 0, sizeof(inst->disp_hash)); 2353} 2354 2355static bool loader_add_dev_ext_table(struct loader_instance *inst, 2356 uint32_t *ptr_idx, 2357 const char *funcName) 2358{ 2359 uint32_t i; 2360 uint32_t idx = *ptr_idx; 2361 struct loader_dispatch_hash_list *list = &inst->disp_hash[idx].list; 2362 2363 if (!inst->disp_hash[idx].func_name) { 2364 // no entry here at this idx, so use it 2365 assert(list->capacity == 0); 2366 inst->disp_hash[idx].func_name = (char *) loader_heap_alloc(inst, 2367 strlen(funcName) + 1, 2368 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2369 if (inst->disp_hash[idx].func_name == NULL) { 2370 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2371 "loader_add_dev_ext_table() can't allocate memory for func_name"); 2372 return false; 2373 } 2374 strncpy(inst->disp_hash[idx].func_name, funcName, strlen(funcName) + 1); 2375 return true; 2376 } 2377 2378 // check for enough capacity 2379 if (list->capacity == 0) { 2380 list->index = loader_heap_alloc(inst, 8 * sizeof(*(list->index)), 2381 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2382 if (list->index == NULL) { 2383 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2384 "loader_add_dev_ext_table() can't allocate list memory"); 2385 return false; 2386 } 2387 list->capacity = 8 * sizeof(*(list->index)); 2388 } else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) { 2389 list->index = loader_heap_realloc(inst, list->index, list->capacity, 2390 list->capacity * 2, 2391 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2392 if (list->index == NULL) { 2393 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2394 "loader_add_dev_ext_table() can't reallocate list memory"); 2395 return false; 2396 } 2397 list->capacity *= 2; 2398 } 2399 2400 //find an unused index in the hash table and use it 2401 i = (idx + 1) % MAX_NUM_DEV_EXTS; 2402 do { 2403 if (!inst->disp_hash[i].func_name) { 2404 assert(inst->disp_hash[i].list.capacity == 0); 2405 inst->disp_hash[i].func_name = (char *) loader_heap_alloc(inst, 2406 strlen(funcName) + 1, 2407 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2408 if (inst->disp_hash[i].func_name == NULL) { 2409 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2410 "loader_add_dev_ext_table() can't rallocate func_name memory"); 2411 return false; 2412 } 2413 strncpy(inst->disp_hash[i].func_name, funcName, strlen(funcName) + 1); 2414 list->index[list->count] = i; 2415 list->count++; 2416 *ptr_idx = i; 2417 return true; 2418 } 2419 i = (i + 1) % MAX_NUM_DEV_EXTS; 2420 } while (i != idx); 2421 2422 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2423 "loader_add_dev_ext_table() couldn't insert into hash table; is it full?"); 2424 return false; 2425} 2426 2427static bool loader_name_in_dev_ext_table(struct loader_instance *inst, 2428 uint32_t *idx, 2429 const char *funcName) 2430{ 2431 uint32_t alt_idx; 2432 if (inst->disp_hash[*idx].func_name && !strcmp( 2433 inst->disp_hash[*idx].func_name, 2434 funcName)) 2435 return true; 2436 2437 // funcName wasn't at the primary spot in the hash table 2438 // search the list of secondary locations (shallow search, not deep search) 2439 for (uint32_t i = 0; i < inst->disp_hash[*idx].list.count; i++) { 2440 alt_idx = inst->disp_hash[*idx].list.index[i]; 2441 if (!strcmp(inst->disp_hash[*idx].func_name, funcName)) { 2442 *idx = alt_idx; 2443 return true; 2444 } 2445 } 2446 2447 return false; 2448} 2449 2450/** 2451 * This function returns generic trampoline code address for unknown entry points. 2452 * Presumably, these unknown entry points (as given by funcName) are device 2453 * extension entrypoints. A hash table is used to keep a list of unknown entry 2454 * points and their mapping to the device extension dispatch table 2455 * (struct loader_dev_ext_dispatch_table). 2456 * \returns 2457 * For a given entry point string (funcName), if an existing mapping is found the 2458 * trampoline address for that mapping is returned. Otherwise, this unknown entry point 2459 * has not been seen yet. Next check if a layer or ICD supports it. If so then a 2460 * new entry in the hash table is initialized and that trampoline address for 2461 * the new entry is returned. Null is returned if the hash table is full or 2462 * if no discovered layer or ICD returns a non-NULL GetProcAddr for it. 2463 */ 2464void *loader_dev_ext_gpa(struct loader_instance *inst, 2465 const char *funcName) 2466{ 2467 uint32_t idx; 2468 uint32_t seed = 0; 2469 2470 idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_DEV_EXTS; 2471 2472 if (loader_name_in_dev_ext_table(inst, &idx, funcName)) 2473 // found funcName already in hash 2474 return loader_get_dev_ext_trampoline(idx); 2475 2476 // Check if funcName is supported in either ICDs or a layer library 2477 if (!loader_check_icds_for_address(inst, funcName)) { 2478 // TODO Add check in layer libraries for support of address 2479 // if support found in layers continue on 2480 return NULL; 2481 } 2482 2483 if (loader_add_dev_ext_table(inst, &idx, funcName)) { 2484 // successfully added new table entry 2485 // init any dev dispatch table entrys as needed 2486 loader_init_dispatch_dev_ext_entry(inst, NULL, idx, funcName); 2487 return loader_get_dev_ext_trampoline(idx); 2488 } 2489 2490 return NULL; 2491} 2492 2493struct loader_instance *loader_get_instance(const VkInstance instance) 2494{ 2495 /* look up the loader_instance in our list by comparing dispatch tables, as 2496 * there is no guarantee the instance is still a loader_instance* after any 2497 * layers which wrap the instance object. 2498 */ 2499 const VkLayerInstanceDispatchTable *disp; 2500 struct loader_instance *ptr_instance = NULL; 2501 disp = loader_get_instance_dispatch(instance); 2502 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 2503 if (inst->disp == disp) { 2504 ptr_instance = inst; 2505 break; 2506 } 2507 } 2508 return ptr_instance; 2509} 2510 2511static loader_platform_dl_handle loader_add_layer_lib( 2512 const struct loader_instance *inst, 2513 const char *chain_type, 2514 struct loader_layer_properties *layer_prop) 2515{ 2516 struct loader_lib_info *new_layer_lib_list, *my_lib; 2517 size_t new_alloc_size; 2518 /* 2519 * TODO: We can now track this information in the 2520 * scanned_layer_libraries list. 2521 */ 2522 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2523 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2524 /* Have already loaded this library, just increment ref count */ 2525 loader.loaded_layer_lib_list[i].ref_count++; 2526 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2527 "%s Chain: Increment layer reference count for layer library %s", 2528 chain_type, layer_prop->lib_name); 2529 return loader.loaded_layer_lib_list[i].lib_handle; 2530 } 2531 } 2532 2533 /* Haven't seen this library so load it */ 2534 new_alloc_size = 0; 2535 if (loader.loaded_layer_lib_capacity == 0) 2536 new_alloc_size = 8 * sizeof(struct loader_lib_info); 2537 else if (loader.loaded_layer_lib_capacity <= loader.loaded_layer_lib_count * 2538 sizeof(struct loader_lib_info)) 2539 new_alloc_size = loader.loaded_layer_lib_capacity * 2; 2540 2541 if (new_alloc_size) { 2542 new_layer_lib_list = loader_heap_realloc( 2543 inst, loader.loaded_layer_lib_list, 2544 loader.loaded_layer_lib_capacity, 2545 new_alloc_size, 2546 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2547 if (!new_layer_lib_list) { 2548 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader: realloc failed in loader_add_layer_lib"); 2549 return NULL; 2550 } 2551 loader.loaded_layer_lib_capacity = new_alloc_size; 2552 loader.loaded_layer_lib_list = new_layer_lib_list; 2553 } else 2554 new_layer_lib_list = loader.loaded_layer_lib_list; 2555 my_lib = &new_layer_lib_list[loader.loaded_layer_lib_count]; 2556 2557 strncpy(my_lib->lib_name, layer_prop->lib_name, sizeof(my_lib->lib_name)); 2558 my_lib->lib_name[sizeof(my_lib->lib_name) - 1] = '\0'; 2559 my_lib->ref_count = 0; 2560 my_lib->lib_handle = NULL; 2561 2562 if ((my_lib->lib_handle = loader_platform_open_library(my_lib->lib_name)) == NULL) { 2563 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 2564 loader_platform_open_library_error(my_lib->lib_name)); 2565 return NULL; 2566 } else { 2567 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2568 "Chain: %s: Loading layer library %s", 2569 chain_type, layer_prop->lib_name); 2570 } 2571 loader.loaded_layer_lib_count++; 2572 my_lib->ref_count++; 2573 2574 return my_lib->lib_handle; 2575} 2576 2577static void loader_remove_layer_lib( 2578 struct loader_instance *inst, 2579 struct loader_layer_properties *layer_prop) 2580{ 2581 uint32_t idx; 2582 struct loader_lib_info *new_layer_lib_list, *my_lib = NULL; 2583 2584 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2585 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2586 /* found matching library */ 2587 idx = i; 2588 my_lib = &loader.loaded_layer_lib_list[i]; 2589 break; 2590 } 2591 } 2592 2593 if (my_lib) { 2594 my_lib->ref_count--; 2595 if (my_lib->ref_count > 0) { 2596 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2597 "Decrement reference count for layer library %s", layer_prop->lib_name); 2598 return; 2599 } 2600 } 2601 loader_platform_close_library(my_lib->lib_handle); 2602 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, 2603 "Unloading layer library %s", layer_prop->lib_name); 2604 2605 /* Need to remove unused library from list */ 2606 new_layer_lib_list = loader_heap_alloc(inst, 2607 loader.loaded_layer_lib_capacity, 2608 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2609 if (!new_layer_lib_list) { 2610 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader: heap alloc failed loader_remove_layer_library"); 2611 return; 2612 } 2613 2614 if (idx > 0) { 2615 /* Copy records before idx */ 2616 memcpy(new_layer_lib_list, &loader.loaded_layer_lib_list[0], 2617 sizeof(struct loader_lib_info) * idx); 2618 } 2619 if (idx < (loader.loaded_layer_lib_count - 1)) { 2620 /* Copy records after idx */ 2621 memcpy(&new_layer_lib_list[idx], &loader.loaded_layer_lib_list[idx+1], 2622 sizeof(struct loader_lib_info) * (loader.loaded_layer_lib_count - idx - 1)); 2623 } 2624 2625 loader_heap_free(inst, loader.loaded_layer_lib_list); 2626 loader.loaded_layer_lib_count--; 2627 loader.loaded_layer_lib_list = new_layer_lib_list; 2628} 2629 2630 2631/** 2632 * Go through the search_list and find any layers which match type. If layer 2633 * type match is found in then add it to ext_list. 2634 */ 2635//TODO need to handle implict layer enable env var and disable env var 2636static void loader_add_layer_implicit( 2637 const struct loader_instance *inst, 2638 const enum layer_type type, 2639 struct loader_layer_list *list, 2640 const struct loader_layer_list *search_list) 2641{ 2642 uint32_t i; 2643 for (i = 0; i < search_list->count; i++) { 2644 const struct loader_layer_properties *prop = &search_list->list[i]; 2645 if (prop->type & type) { 2646 /* Found an layer with the same type, add to layer_list */ 2647 loader_add_to_layer_list(inst, list, 1, prop); 2648 } 2649 } 2650 2651} 2652 2653/** 2654 * Get the layer name(s) from the env_name environment variable. If layer 2655 * is found in search_list then add it to layer_list. But only add it to 2656 * layer_list if type matches. 2657 */ 2658static void loader_add_layer_env( 2659 const struct loader_instance *inst, 2660 const enum layer_type type, 2661 const char *env_name, 2662 struct loader_layer_list *layer_list, 2663 const struct loader_layer_list *search_list) 2664{ 2665 char *layerEnv; 2666 char *next, *name; 2667 2668 layerEnv = getenv(env_name); 2669 if (layerEnv == NULL) { 2670 return; 2671 } 2672 name = loader_stack_alloc(strlen(layerEnv) + 1); 2673 if (name == NULL) { 2674 return; 2675 } 2676 strcpy(name, layerEnv); 2677 2678 while (name && *name ) { 2679 next = loader_get_next_path(name); 2680 loader_find_layer_name_add_list(inst, name, type, search_list, layer_list); 2681 name = next; 2682 } 2683 2684 return; 2685} 2686 2687void loader_deactivate_instance_layers(struct loader_instance *instance) 2688{ 2689 if (!instance->activated_layer_list.count) { 2690 return; 2691 } 2692 2693 /* Create instance chain of enabled layers */ 2694 for (uint32_t i = 0; i < instance->activated_layer_list.count; i++) { 2695 struct loader_layer_properties *layer_prop = &instance->activated_layer_list.list[i]; 2696 2697 loader_remove_layer_lib(instance, layer_prop); 2698 } 2699 loader_destroy_layer_list(instance, &instance->activated_layer_list); 2700} 2701 2702VkResult loader_enable_instance_layers( 2703 struct loader_instance *inst, 2704 const VkInstanceCreateInfo *pCreateInfo, 2705 const struct loader_layer_list *instance_layers) 2706{ 2707 VkResult err; 2708 2709 assert(inst && "Cannot have null instance"); 2710 2711 if (!loader_init_layer_list(inst, &inst->activated_layer_list)) { 2712 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Failed to alloc Instance activated layer list"); 2713 return VK_ERROR_OUT_OF_HOST_MEMORY; 2714 } 2715 2716 /* Add any implicit layers first */ 2717 loader_add_layer_implicit( 2718 inst, 2719 VK_LAYER_TYPE_INSTANCE_IMPLICIT, 2720 &inst->activated_layer_list, 2721 instance_layers); 2722 2723 /* Add any layers specified via environment variable next */ 2724 loader_add_layer_env( 2725 inst, 2726 VK_LAYER_TYPE_INSTANCE_EXPLICIT, 2727 "VK_INSTANCE_LAYERS", 2728 &inst->activated_layer_list, 2729 instance_layers); 2730 2731 /* Add layers specified by the application */ 2732 err = loader_add_layer_names_to_list( 2733 inst, 2734 &inst->activated_layer_list, 2735 pCreateInfo->enabledLayerNameCount, 2736 pCreateInfo->ppEnabledLayerNames, 2737 instance_layers); 2738 2739 return err; 2740} 2741 2742uint32_t loader_activate_instance_layers(struct loader_instance *inst) 2743{ 2744 uint32_t layer_idx; 2745 VkBaseLayerObject *wrappedInstance; 2746 2747 if (inst == NULL) { 2748 return 0; 2749 } 2750 2751 // NOTE inst is unwrapped at this point in time 2752 void* baseObj = (void*) inst; 2753 void* nextObj = (void*) inst; 2754 VkBaseLayerObject *nextInstObj; 2755 PFN_vkGetInstanceProcAddr nextGPA = loader_gpa_instance_internal; 2756 2757 if (!inst->activated_layer_list.count) { 2758 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2759 return 0; 2760 } 2761 2762 wrappedInstance = loader_stack_alloc(sizeof(VkBaseLayerObject) 2763 * inst->activated_layer_list.count); 2764 if (!wrappedInstance) { 2765 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Failed to alloc Instance objects for layer"); 2766 return 0; 2767 } 2768 2769 /* Create instance chain of enabled layers */ 2770 layer_idx = inst->activated_layer_list.count - 1; 2771 for (int32_t i = inst->activated_layer_list.count - 1; i >= 0; i--) { 2772 struct loader_layer_properties *layer_prop = &inst->activated_layer_list.list[i]; 2773 loader_platform_dl_handle lib_handle; 2774 2775 /* 2776 * Note: An extension's Get*ProcAddr should not return a function pointer for 2777 * any extension entry points until the extension has been enabled. 2778 * To do this requires a different behavior from Get*ProcAddr functions implemented 2779 * in layers. 2780 * The very first call to a layer will be it's Get*ProcAddr function requesting 2781 * the layer's vkGet*ProcAddr. The layer should initialize its internal dispatch table 2782 * with the wrapped object given (either Instance or Device) and return the layer's 2783 * Get*ProcAddr function. The layer should also use this opportunity to record the 2784 * baseObject so that it can find the correct local dispatch table on future calls. 2785 * Subsequent calls to Get*ProcAddr, CreateInstance, CreateDevice 2786 * will not use a wrapped object and must look up their local dispatch table from 2787 * the given baseObject. 2788 */ 2789 nextInstObj = (wrappedInstance + layer_idx); 2790 nextInstObj->pGPA = (PFN_vkGPA) nextGPA; 2791 nextInstObj->baseObject = baseObj; 2792 nextInstObj->nextObject = nextObj; 2793 nextObj = (void*) nextInstObj; 2794 2795 lib_handle = loader_add_layer_lib(inst, "instance", layer_prop); 2796 if (!lib_handle) 2797 continue; // TODO what should we do in this case 2798 if ((nextGPA = layer_prop->functions.get_instance_proc_addr) == NULL) { 2799 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) { 2800 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); 2801 layer_prop->functions.get_instance_proc_addr = nextGPA; 2802 } else 2803 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa); 2804 if (!nextGPA) { 2805 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Failed to find vkGetInstanceProcAddr in layer %s", layer_prop->lib_name); 2806 2807 /* TODO: Should we return nextObj, nextGPA to previous? or decrement layer_list count*/ 2808 continue; 2809 } 2810 } 2811 2812 loader_log(inst, VK_DEBUG_REPORT_INFO_BIT_EXT, 0, 2813 "Insert instance layer %s (%s)", 2814 layer_prop->info.layerName, 2815 layer_prop->lib_name); 2816 2817 layer_idx--; 2818 } 2819 2820 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2821 2822 return inst->activated_layer_list.count; 2823} 2824 2825void loader_activate_instance_layer_extensions(struct loader_instance *inst) 2826{ 2827 2828 loader_init_instance_extension_dispatch_table(inst->disp, 2829 inst->disp->GetInstanceProcAddr, 2830 (VkInstance) inst); 2831} 2832 2833static VkResult loader_enable_device_layers( 2834 const struct loader_instance *inst, 2835 struct loader_icd *icd, 2836 struct loader_device *dev, 2837 const VkDeviceCreateInfo *pCreateInfo, 2838 const struct loader_layer_list *device_layers) 2839 2840{ 2841 VkResult err; 2842 2843 assert(dev && "Cannot have null device"); 2844 2845 if (dev->activated_layer_list.list == NULL || dev->activated_layer_list.capacity == 0) { 2846 loader_init_layer_list(inst, &dev->activated_layer_list); 2847 } 2848 2849 if (dev->activated_layer_list.list == NULL) { 2850 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Failed to alloc device activated layer list"); 2851 return VK_ERROR_OUT_OF_HOST_MEMORY; 2852 } 2853 2854 /* Add any implicit layers first */ 2855 loader_add_layer_implicit( 2856 inst, 2857 VK_LAYER_TYPE_DEVICE_IMPLICIT, 2858 &dev->activated_layer_list, 2859 device_layers); 2860 2861 /* Add any layers specified via environment variable next */ 2862 loader_add_layer_env( 2863 inst, 2864 VK_LAYER_TYPE_DEVICE_EXPLICIT, 2865 "VK_DEVICE_LAYERS", 2866 &dev->activated_layer_list, 2867 device_layers); 2868 2869 /* Add layers specified by the application */ 2870 err = loader_add_layer_names_to_list( 2871 inst, 2872 &dev->activated_layer_list, 2873 pCreateInfo->enabledLayerNameCount, 2874 pCreateInfo->ppEnabledLayerNames, 2875 device_layers); 2876 2877 return err; 2878} 2879 2880/* 2881 * This function terminates the device chain for CreateDevice. 2882 * CreateDevice is a special case and so the loader call's 2883 * the ICD's CreateDevice before creating the chain. Since 2884 * we can't call CreateDevice twice we must terminate the 2885 * device chain with something else. 2886 */ 2887static VKAPI_ATTR VkResult VKAPI_CALL scratch_vkCreateDevice( 2888 VkPhysicalDevice physicalDevice, 2889 const VkDeviceCreateInfo *pCreateInfo, 2890 const VkAllocationCallbacks* pAllocator, 2891 VkDevice *pDevice) 2892{ 2893 return VK_SUCCESS; 2894} 2895 2896static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_GetDeviceChainProcAddr(VkDevice device, const char * name) 2897{ 2898 if (!strcmp(name, "vkGetDeviceProcAddr")) 2899 return (PFN_vkVoidFunction) loader_GetDeviceChainProcAddr; 2900 if (!strcmp(name, "vkCreateDevice")) 2901 return (PFN_vkVoidFunction) scratch_vkCreateDevice; 2902 2903 struct loader_device *found_dev; 2904 struct loader_icd *icd = loader_get_icd_and_device(device, &found_dev); 2905 return icd->GetDeviceProcAddr(device, name); 2906} 2907 2908static uint32_t loader_activate_device_layers( 2909 const struct loader_instance *inst, 2910 struct loader_device *dev, 2911 VkDevice device) 2912{ 2913 if (!dev) { 2914 return 0; 2915 } 2916 2917 /* activate any layer libraries */ 2918 void* nextObj = (void*) device; 2919 void* baseObj = nextObj; 2920 VkBaseLayerObject *nextGpuObj; 2921 PFN_vkGetDeviceProcAddr nextGPA = loader_GetDeviceChainProcAddr; 2922 VkBaseLayerObject *wrappedGpus; 2923 2924 if (!dev->activated_layer_list.count) { 2925 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2926 (VkDevice) nextObj, (VkDevice) baseObj); 2927 return 0; 2928 } 2929 2930 wrappedGpus = loader_heap_alloc(inst, 2931 sizeof (VkBaseLayerObject) * dev->activated_layer_list.count, 2932 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 2933 if (!wrappedGpus) { 2934 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Failed to alloc Gpu objects for layer"); 2935 return 0; 2936 } 2937 2938 for (int32_t i = dev->activated_layer_list.count - 1; i >= 0; i--) { 2939 2940 struct loader_layer_properties *layer_prop = &dev->activated_layer_list.list[i]; 2941 loader_platform_dl_handle lib_handle; 2942 2943 nextGpuObj = (wrappedGpus + i); 2944 nextGpuObj->pGPA = (PFN_vkGPA)nextGPA; 2945 nextGpuObj->baseObject = baseObj; 2946 nextGpuObj->nextObject = nextObj; 2947 nextObj = (void*) nextGpuObj; 2948 2949 lib_handle = loader_add_layer_lib(inst, "device", layer_prop); 2950 if ((nextGPA = layer_prop->functions.get_device_proc_addr) == NULL) { 2951 if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) { 2952 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr"); 2953 layer_prop->functions.get_device_proc_addr = nextGPA; 2954 } else 2955 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa); 2956 if (!nextGPA) { 2957 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Failed to find vkGetDeviceProcAddr in layer %s", layer_prop->lib_name); 2958 continue; 2959 } 2960 } 2961 2962 loader_log(inst, VK_DEBUG_REPORT_INFO_BIT_EXT, 0, 2963 "Insert device layer library %s (%s)", 2964 layer_prop->info.layerName, 2965 layer_prop->lib_name); 2966 2967 } 2968 2969 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2970 (VkDevice) nextObj, (VkDevice) baseObj); 2971 loader_heap_free(inst, wrappedGpus); 2972 2973 return dev->activated_layer_list.count; 2974} 2975 2976VkResult loader_validate_layers( 2977 const uint32_t layer_count, 2978 const char * const *ppEnabledLayerNames, 2979 const struct loader_layer_list *list) 2980{ 2981 struct loader_layer_properties *prop; 2982 2983 for (uint32_t i = 0; i < layer_count; i++) { 2984 prop = loader_get_layer_property(ppEnabledLayerNames[i], 2985 list); 2986 if (!prop) { 2987 return VK_ERROR_LAYER_NOT_PRESENT; 2988 } 2989 } 2990 2991 return VK_SUCCESS; 2992} 2993 2994VkResult loader_validate_instance_extensions( 2995 const struct loader_extension_list *icd_exts, 2996 const struct loader_layer_list *instance_layer, 2997 const VkInstanceCreateInfo *pCreateInfo) 2998{ 2999 VkExtensionProperties *extension_prop; 3000 struct loader_layer_properties *layer_prop; 3001 3002 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 3003 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 3004 icd_exts); 3005 3006 if (extension_prop) { 3007 continue; 3008 } 3009 3010 extension_prop = NULL; 3011 3012 /* Not in global list, search layer extension lists */ 3013 for (uint32_t j = 0; j < pCreateInfo->enabledLayerNameCount; j++) { 3014 layer_prop = loader_get_layer_property(pCreateInfo->ppEnabledLayerNames[i], 3015 instance_layer); 3016 if (!layer_prop) { 3017 /* Should NOT get here, loader_validate_layers 3018 * should have already filtered this case out. 3019 */ 3020 continue; 3021 } 3022 3023 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 3024 &layer_prop->instance_extension_list); 3025 if (extension_prop) { 3026 /* Found the extension in one of the layers enabled by the app. */ 3027 break; 3028 } 3029 } 3030 3031 if (!extension_prop) { 3032 /* Didn't find extension name in any of the global layers, error out */ 3033 return VK_ERROR_EXTENSION_NOT_PRESENT; 3034 } 3035 } 3036 return VK_SUCCESS; 3037} 3038 3039VkResult loader_validate_device_extensions( 3040 struct loader_physical_device *phys_dev, 3041 const struct loader_layer_list *device_layer, 3042 const VkDeviceCreateInfo *pCreateInfo) 3043{ 3044 VkExtensionProperties *extension_prop; 3045 struct loader_layer_properties *layer_prop; 3046 3047 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 3048 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 3049 extension_prop = get_extension_property(extension_name, 3050 &phys_dev->device_extension_cache); 3051 3052 if (extension_prop) { 3053 continue; 3054 } 3055 3056 /* Not in global list, search layer extension lists */ 3057 for (uint32_t j = 0; j < pCreateInfo->enabledLayerNameCount; j++) { 3058 const char *layer_name = pCreateInfo->ppEnabledLayerNames[j]; 3059 layer_prop = loader_get_layer_property(layer_name, 3060 device_layer); 3061 3062 if (!layer_prop) { 3063 /* Should NOT get here, loader_validate_instance_layers 3064 * should have already filtered this case out. 3065 */ 3066 continue; 3067 } 3068 3069 extension_prop = get_dev_extension_property(extension_name, 3070 &layer_prop->device_extension_list); 3071 if (extension_prop) { 3072 /* Found the extension in one of the layers enabled by the app. */ 3073 break; 3074 } 3075 } 3076 3077 if (!extension_prop) { 3078 /* Didn't find extension name in any of the device layers, error out */ 3079 return VK_ERROR_EXTENSION_NOT_PRESENT; 3080 } 3081 } 3082 return VK_SUCCESS; 3083} 3084 3085VKAPI_ATTR VkResult VKAPI_CALL loader_CreateInstance( 3086 const VkInstanceCreateInfo* pCreateInfo, 3087 const VkAllocationCallbacks* pAllocator, 3088 VkInstance* pInstance) 3089{ 3090 struct loader_instance *ptr_instance = *(struct loader_instance **) pInstance; 3091 struct loader_icd *icd; 3092 VkExtensionProperties *prop; 3093 char **filtered_extension_names = NULL; 3094 VkInstanceCreateInfo icd_create_info; 3095 VkResult res = VK_SUCCESS; 3096 bool success = false; 3097 3098 memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info)); 3099 3100 icd_create_info.enabledLayerNameCount = 0; 3101 icd_create_info.ppEnabledLayerNames = NULL; 3102 3103 /* 3104 * NOTE: Need to filter the extensions to only those 3105 * supported by the ICD. 3106 * No ICD will advertise support for layers. An ICD 3107 * library could support a layer, but it would be 3108 * independent of the actual ICD, just in the same library. 3109 */ 3110 filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionNameCount * sizeof(char *)); 3111 if (!filtered_extension_names) { 3112 return VK_ERROR_OUT_OF_HOST_MEMORY; 3113 } 3114 icd_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 3115 3116 for (uint32_t i = 0; i < ptr_instance->icd_libs.count; i++) { 3117 icd = loader_icd_add(ptr_instance, &ptr_instance->icd_libs.list[i]); 3118 if (icd) { 3119 icd_create_info.enabledExtensionNameCount = 0; 3120 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 3121 prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 3122 &ptr_instance->ext_list); 3123 if (prop) { 3124 filtered_extension_names[icd_create_info.enabledExtensionNameCount] = (char *) pCreateInfo->ppEnabledExtensionNames[i]; 3125 icd_create_info.enabledExtensionNameCount++; 3126 } 3127 } 3128 3129 res = ptr_instance->icd_libs.list[i].CreateInstance(&icd_create_info, 3130 pAllocator, 3131 &(icd->instance)); 3132 if (res == VK_SUCCESS) 3133 success = loader_icd_init_entrys( 3134 icd, 3135 icd->instance, 3136 ptr_instance->icd_libs.list[i].GetInstanceProcAddr); 3137 3138 if (res != VK_SUCCESS || !success) 3139 { 3140 ptr_instance->icds = ptr_instance->icds->next; 3141 loader_icd_destroy(ptr_instance, icd); 3142 icd->instance = VK_NULL_HANDLE; 3143 loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, 3144 "ICD ignored: failed to CreateInstance and find entrypoints with ICD"); 3145 } 3146 } 3147 } 3148 3149 /* 3150 * If no ICDs were added to instance list and res is unchanged 3151 * from it's initial value, the loader was unable to find 3152 * a suitable ICD. 3153 */ 3154 if (ptr_instance->icds == NULL) { 3155 if (res == VK_SUCCESS) { 3156 return VK_ERROR_INCOMPATIBLE_DRIVER; 3157 } else { 3158 return res; 3159 } 3160 } 3161 3162 return VK_SUCCESS; 3163} 3164 3165VKAPI_ATTR void VKAPI_CALL loader_DestroyInstance( 3166 VkInstance instance, 3167 const VkAllocationCallbacks* pAllocator) 3168{ 3169 struct loader_instance *ptr_instance = loader_instance(instance); 3170 struct loader_icd *icds = ptr_instance->icds; 3171 struct loader_icd *next_icd; 3172 3173 // Remove this instance from the list of instances: 3174 struct loader_instance *prev = NULL; 3175 struct loader_instance *next = loader.instances; 3176 while (next != NULL) { 3177 if (next == ptr_instance) { 3178 // Remove this instance from the list: 3179 if (prev) 3180 prev->next = next->next; 3181 else 3182 loader.instances = next->next; 3183 break; 3184 } 3185 prev = next; 3186 next = next->next; 3187 } 3188 3189 while (icds) { 3190 if (icds->instance) { 3191 icds->DestroyInstance(icds->instance, pAllocator); 3192 } 3193 next_icd = icds->next; 3194 icds->instance = VK_NULL_HANDLE; 3195 loader_icd_destroy(ptr_instance, icds); 3196 3197 icds = next_icd; 3198 } 3199 loader_delete_layer_properties(ptr_instance, &ptr_instance->device_layer_list); 3200 loader_delete_layer_properties(ptr_instance, &ptr_instance->instance_layer_list); 3201 loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_libs); 3202 loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *) 3203 &ptr_instance->ext_list); 3204 for (uint32_t i = 0; i < ptr_instance->total_gpu_count; i++) 3205 loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *) 3206 &ptr_instance->phys_devs[i].device_extension_cache); 3207 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3208 loader_free_dev_ext_table(ptr_instance); 3209} 3210 3211VkResult loader_init_physical_device_info(struct loader_instance *ptr_instance) 3212{ 3213 struct loader_icd *icd; 3214 uint32_t i, j, idx, count = 0; 3215 VkResult res; 3216 struct loader_phys_dev_per_icd *phys_devs; 3217 3218 ptr_instance->total_gpu_count = 0; 3219 phys_devs = (struct loader_phys_dev_per_icd *) loader_stack_alloc( 3220 sizeof(struct loader_phys_dev_per_icd) * 3221 ptr_instance->total_icd_count); 3222 if (!phys_devs) 3223 return VK_ERROR_OUT_OF_HOST_MEMORY; 3224 3225 icd = ptr_instance->icds; 3226 for (i = 0; i < ptr_instance->total_icd_count; i++) { 3227 assert(icd); 3228 res = icd->EnumeratePhysicalDevices(icd->instance, &phys_devs[i].count, NULL); 3229 if (res != VK_SUCCESS) 3230 return res; 3231 count += phys_devs[i].count; 3232 icd = icd->next; 3233 } 3234 3235 ptr_instance->phys_devs = (struct loader_physical_device *) loader_heap_alloc( 3236 ptr_instance, 3237 count * sizeof(struct loader_physical_device), 3238 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); 3239 if (!ptr_instance->phys_devs) 3240 return VK_ERROR_OUT_OF_HOST_MEMORY; 3241 3242 icd = ptr_instance->icds; 3243 3244 struct loader_physical_device *inst_phys_devs = ptr_instance->phys_devs; 3245 idx = 0; 3246 for (i = 0; i < ptr_instance->total_icd_count; i++) { 3247 assert(icd); 3248 3249 phys_devs[i].phys_devs = (VkPhysicalDevice *) loader_stack_alloc( 3250 phys_devs[i].count * sizeof(VkPhysicalDevice)); 3251 if (!phys_devs[i].phys_devs) { 3252 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3253 ptr_instance->phys_devs = NULL; 3254 return VK_ERROR_OUT_OF_HOST_MEMORY; 3255 } 3256 res = icd->EnumeratePhysicalDevices( 3257 icd->instance, 3258 &(phys_devs[i].count), 3259 phys_devs[i].phys_devs); 3260 if ((res == VK_SUCCESS)) { 3261 ptr_instance->total_gpu_count += phys_devs[i].count; 3262 for (j = 0; j < phys_devs[i].count; j++) { 3263 3264 // initialize the loader's physicalDevice object 3265 loader_set_dispatch((void *) &inst_phys_devs[idx], ptr_instance->disp); 3266 inst_phys_devs[idx].this_instance = ptr_instance; 3267 inst_phys_devs[idx].this_icd = icd; 3268 inst_phys_devs[idx].phys_dev = phys_devs[i].phys_devs[j]; 3269 memset(&inst_phys_devs[idx].device_extension_cache, 0, sizeof(struct loader_extension_list)); 3270 3271 idx++; 3272 } 3273 } else { 3274 loader_heap_free(ptr_instance, ptr_instance->phys_devs); 3275 ptr_instance->phys_devs = NULL; 3276 return res; 3277 } 3278 3279 icd = icd->next; 3280 } 3281 3282 return VK_SUCCESS; 3283} 3284 3285VKAPI_ATTR VkResult VKAPI_CALL loader_EnumeratePhysicalDevices( 3286 VkInstance instance, 3287 uint32_t* pPhysicalDeviceCount, 3288 VkPhysicalDevice* pPhysicalDevices) 3289{ 3290 uint32_t i; 3291 struct loader_instance *ptr_instance = (struct loader_instance *) instance; 3292 VkResult res = VK_SUCCESS; 3293 3294 if (ptr_instance->total_gpu_count == 0) { 3295 res = loader_init_physical_device_info(ptr_instance); 3296 } 3297 3298 *pPhysicalDeviceCount = ptr_instance->total_gpu_count; 3299 if (!pPhysicalDevices) { 3300 return res; 3301 } 3302 3303 for (i = 0; i < ptr_instance->total_gpu_count; i++) { 3304 pPhysicalDevices[i] = (VkPhysicalDevice) &ptr_instance->phys_devs[i]; 3305 } 3306 3307 return res; 3308} 3309 3310VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceProperties( 3311 VkPhysicalDevice physicalDevice, 3312 VkPhysicalDeviceProperties* pProperties) 3313{ 3314 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3315 struct loader_icd *icd = phys_dev->this_icd; 3316 3317 if (icd->GetPhysicalDeviceProperties) 3318 icd->GetPhysicalDeviceProperties(phys_dev->phys_dev, pProperties); 3319} 3320 3321VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceQueueFamilyProperties ( 3322 VkPhysicalDevice physicalDevice, 3323 uint32_t* pQueueFamilyPropertyCount, 3324 VkQueueFamilyProperties* pProperties) 3325{ 3326 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3327 struct loader_icd *icd = phys_dev->this_icd; 3328 3329 if (icd->GetPhysicalDeviceQueueFamilyProperties) 3330 icd->GetPhysicalDeviceQueueFamilyProperties(phys_dev->phys_dev, pQueueFamilyPropertyCount, pProperties); 3331} 3332 3333VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceMemoryProperties ( 3334 VkPhysicalDevice physicalDevice, 3335 VkPhysicalDeviceMemoryProperties* pProperties) 3336{ 3337 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3338 struct loader_icd *icd = phys_dev->this_icd; 3339 3340 if (icd->GetPhysicalDeviceMemoryProperties) 3341 icd->GetPhysicalDeviceMemoryProperties(phys_dev->phys_dev, pProperties); 3342} 3343 3344VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceFeatures( 3345 VkPhysicalDevice physicalDevice, 3346 VkPhysicalDeviceFeatures* pFeatures) 3347{ 3348 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3349 struct loader_icd *icd = phys_dev->this_icd; 3350 3351 if (icd->GetPhysicalDeviceFeatures) 3352 icd->GetPhysicalDeviceFeatures(phys_dev->phys_dev, pFeatures); 3353} 3354 3355VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceFormatProperties( 3356 VkPhysicalDevice physicalDevice, 3357 VkFormat format, 3358 VkFormatProperties* pFormatInfo) 3359{ 3360 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3361 struct loader_icd *icd = phys_dev->this_icd; 3362 3363 if (icd->GetPhysicalDeviceFormatProperties) 3364 icd->GetPhysicalDeviceFormatProperties(phys_dev->phys_dev, format, pFormatInfo); 3365} 3366 3367VKAPI_ATTR VkResult VKAPI_CALL loader_GetPhysicalDeviceImageFormatProperties( 3368 VkPhysicalDevice physicalDevice, 3369 VkFormat format, 3370 VkImageType type, 3371 VkImageTiling tiling, 3372 VkImageUsageFlags usage, 3373 VkImageCreateFlags flags, 3374 VkImageFormatProperties* pImageFormatProperties) 3375{ 3376 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3377 struct loader_icd *icd = phys_dev->this_icd; 3378 3379 if (!icd->GetPhysicalDeviceImageFormatProperties) 3380 return VK_ERROR_INITIALIZATION_FAILED; 3381 3382 return icd->GetPhysicalDeviceImageFormatProperties(phys_dev->phys_dev, format, 3383 type, tiling, usage, flags, pImageFormatProperties); 3384} 3385 3386VKAPI_ATTR void VKAPI_CALL loader_GetPhysicalDeviceSparseImageFormatProperties( 3387 VkPhysicalDevice physicalDevice, 3388 VkFormat format, 3389 VkImageType type, 3390 VkSampleCountFlagBits samples, 3391 VkImageUsageFlags usage, 3392 VkImageTiling tiling, 3393 uint32_t* pNumProperties, 3394 VkSparseImageFormatProperties* pProperties) 3395{ 3396 struct loader_physical_device *phys_dev = (struct loader_physical_device *) physicalDevice; 3397 struct loader_icd *icd = phys_dev->this_icd; 3398 3399 if (icd->GetPhysicalDeviceSparseImageFormatProperties) 3400 icd->GetPhysicalDeviceSparseImageFormatProperties(phys_dev->phys_dev, format, type, samples, usage, tiling, pNumProperties, pProperties); 3401} 3402 3403VKAPI_ATTR VkResult VKAPI_CALL loader_CreateDevice( 3404 VkPhysicalDevice physicalDevice, 3405 const VkDeviceCreateInfo* pCreateInfo, 3406 const VkAllocationCallbacks* pAllocator, 3407 VkDevice* pDevice) 3408{ 3409 struct loader_physical_device *phys_dev; 3410 struct loader_icd *icd; 3411 struct loader_device *dev; 3412 struct loader_instance *inst; 3413 VkDeviceCreateInfo device_create_info; 3414 char **filtered_extension_names = NULL; 3415 VkResult res; 3416 3417 assert(pCreateInfo->queueCreateInfoCount >= 1); 3418 3419 //TODO this only works for one physical device per instance 3420 // once CreateDevice layer bootstrapping is done via DeviceCreateInfo 3421 // hopefully don't need this anymore in trampoline code 3422 phys_dev = loader_get_physical_device(physicalDevice); 3423 icd = phys_dev->this_icd; 3424 if (!icd) 3425 return VK_ERROR_INITIALIZATION_FAILED; 3426 3427 inst = phys_dev->this_instance; 3428 3429 if (!icd->CreateDevice) { 3430 return VK_ERROR_INITIALIZATION_FAILED; 3431 } 3432 3433 /* validate any app enabled layers are available */ 3434 if (pCreateInfo->enabledLayerNameCount > 0) { 3435 res = loader_validate_layers(pCreateInfo->enabledLayerNameCount, 3436 pCreateInfo->ppEnabledLayerNames, 3437 &inst->device_layer_list); 3438 if (res != VK_SUCCESS) { 3439 return res; 3440 } 3441 } 3442 3443 /* Get the physical device extensions if they haven't been retrieved yet */ 3444 if (phys_dev->device_extension_cache.capacity == 0) { 3445 if (!loader_init_generic_list(inst, (struct loader_generic_list *) 3446 &phys_dev->device_extension_cache, 3447 sizeof(VkExtensionProperties))) { 3448 return VK_ERROR_OUT_OF_HOST_MEMORY; 3449 } 3450 res = loader_add_device_extensions( 3451 inst, physicalDevice, 3452 phys_dev->this_icd->this_icd_lib->lib_name, 3453 &phys_dev->device_extension_cache); 3454 if (res != VK_SUCCESS) { 3455 return res; 3456 } 3457 } 3458 /* make sure requested extensions to be enabled are supported */ 3459 res = loader_validate_device_extensions(phys_dev, &inst->device_layer_list, pCreateInfo); 3460 if (res != VK_SUCCESS) { 3461 return res; 3462 } 3463 3464 /* 3465 * NOTE: Need to filter the extensions to only those 3466 * supported by the ICD. 3467 * No ICD will advertise support for layers. An ICD 3468 * library could support a layer, but it would be 3469 * independent of the actual ICD, just in the same library. 3470 */ 3471 filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionNameCount * sizeof(char *)); 3472 if (!filtered_extension_names) { 3473 return VK_ERROR_OUT_OF_HOST_MEMORY; 3474 } 3475 3476 /* Copy user's data */ 3477 memcpy(&device_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo)); 3478 3479 /* ICD's do not use layers */ 3480 device_create_info.enabledLayerNameCount = 0; 3481 device_create_info.ppEnabledLayerNames = NULL; 3482 3483 device_create_info.enabledExtensionNameCount = 0; 3484 device_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 3485 3486 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) { 3487 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 3488 VkExtensionProperties *prop = get_extension_property(extension_name, 3489 &phys_dev->device_extension_cache); 3490 if (prop) { 3491 filtered_extension_names[device_create_info.enabledExtensionNameCount] = (char *) extension_name; 3492 device_create_info.enabledExtensionNameCount++; 3493 } 3494 } 3495 3496 // since physicalDevice object maybe wrapped by a layer need to get unwrapped version 3497 // we haven't yet called down the chain for the layer to unwrap the object 3498 res = icd->CreateDevice(phys_dev->phys_dev, pCreateInfo, pAllocator, pDevice); 3499 if (res != VK_SUCCESS) { 3500 return res; 3501 } 3502 3503 dev = loader_add_logical_device(inst, *pDevice, &icd->logical_device_list); 3504 if (dev == NULL) { 3505 return VK_ERROR_OUT_OF_HOST_MEMORY; 3506 } 3507 3508 loader_init_dispatch(*pDevice, &dev->loader_dispatch); 3509 3510 /* activate any layers on device chain which terminates with device*/ 3511 res = loader_enable_device_layers(inst, icd, dev, pCreateInfo, &inst->device_layer_list); 3512 if (res != VK_SUCCESS) { 3513 loader_destroy_logical_device(inst, dev); 3514 return res; 3515 } 3516 loader_activate_device_layers(inst, dev, *pDevice); 3517 3518 /* finally can call down the chain */ 3519 res = dev->loader_dispatch.core_dispatch.CreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice); 3520 3521 /* initialize any device extension dispatch entry's from the instance list*/ 3522 loader_init_dispatch_dev_ext(inst, dev); 3523 3524 /* initialize WSI device extensions as part of core dispatch since loader has 3525 * dedicated trampoline code for these*/ 3526 loader_init_device_extension_dispatch_table(&dev->loader_dispatch, 3527 dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, 3528 *pDevice); 3529 dev->loader_dispatch.core_dispatch.CreateDevice = icd->CreateDevice; 3530 3531 return res; 3532} 3533 3534/** 3535 * Get an instance level or global level entry point address. 3536 * @param instance 3537 * @param pName 3538 * @return 3539 * If instance == NULL returns a global level functions only 3540 * If instance is valid returns a trampoline entry point for all dispatchable Vulkan 3541 * functions both core and extensions. 3542 */ 3543LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char * pName) 3544{ 3545 3546 void *addr; 3547 3548 addr = globalGetProcAddr(pName); 3549 if (instance == VK_NULL_HANDLE) { 3550 // get entrypoint addresses that are global (no dispatchable object) 3551 3552 return addr; 3553 } else { 3554 // if a global entrypoint return NULL 3555 if (addr) 3556 return NULL; 3557 } 3558 3559 struct loader_instance *ptr_instance = loader_get_instance(instance); 3560 if (ptr_instance == NULL) 3561 return NULL; 3562 // Return trampoline code for non-global entrypoints including any extensions. 3563 // Device extensions are returned if a layer or ICD supports the extension. 3564 // Instance extensions are returned if the extension is enabled and the loader 3565 // or someone else supports the extension 3566 return trampolineGetProcAddr(ptr_instance, pName); 3567 3568} 3569 3570/** 3571 * Get a device level or global level entry point address. 3572 * @param device 3573 * @param pName 3574 * @return 3575 * If device is valid, returns a device relative entry point for device level 3576 * entry points both core and extensions. 3577 * Device relative means call down the device chain. 3578 */ 3579LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char * pName) 3580{ 3581 void *addr; 3582 3583 /* for entrypoints that loader must handle (ie non-dispatchable or create object) 3584 make sure the loader entrypoint is returned */ 3585 addr = loader_non_passthrough_gdpa(pName); 3586 if (addr) { 3587 return addr; 3588 } 3589 3590 /* Although CreateDevice is on device chain it's dispatchable object isn't 3591 * a VkDevice or child of VkDevice so return NULL. 3592 */ 3593 if (!strcmp(pName, "CreateDevice")) 3594 return NULL; 3595 3596 /* return the dispatch table entrypoint for the fastest case */ 3597 const VkLayerDispatchTable *disp_table = * (VkLayerDispatchTable **) device; 3598 if (disp_table == NULL) 3599 return NULL; 3600 3601 addr = loader_lookup_device_dispatch_table(disp_table, pName); 3602 if (addr) 3603 return addr; 3604 3605 if (disp_table->GetDeviceProcAddr == NULL) 3606 return NULL; 3607 return disp_table->GetDeviceProcAddr(device, pName); 3608} 3609 3610LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( 3611 const char* pLayerName, 3612 uint32_t* pPropertyCount, 3613 VkExtensionProperties* pProperties) 3614{ 3615 struct loader_extension_list *global_ext_list=NULL; 3616 struct loader_layer_list instance_layers; 3617 struct loader_extension_list icd_extensions; 3618 struct loader_icd_libs icd_libs; 3619 uint32_t copy_size; 3620 3621 tls_instance = NULL; 3622 memset(&icd_extensions, 0, sizeof(icd_extensions)); 3623 memset(&instance_layers, 0, sizeof(instance_layers)); 3624 loader_platform_thread_once(&once_init, loader_initialize); 3625 3626 /* get layer libraries if needed */ 3627 if (pLayerName && strlen(pLayerName) != 0) { 3628 loader_layer_scan(NULL, &instance_layers, NULL); 3629 for (uint32_t i = 0; i < instance_layers.count; i++) { 3630 struct loader_layer_properties *props = &instance_layers.list[i]; 3631 if (strcmp(props->info.layerName, pLayerName) == 0) { 3632 global_ext_list = &props->instance_extension_list; 3633 } 3634 } 3635 } 3636 else { 3637 /* Scan/discover all ICD libraries */ 3638 memset(&icd_libs, 0 , sizeof(struct loader_icd_libs)); 3639 loader_icd_scan(NULL, &icd_libs); 3640 /* get extensions from all ICD's, merge so no duplicates */ 3641 loader_get_icd_loader_instance_extensions(NULL, &icd_libs, &icd_extensions); 3642 loader_scanned_icd_clear(NULL, &icd_libs); 3643 global_ext_list = &icd_extensions; 3644 } 3645 3646 if (global_ext_list == NULL) { 3647 loader_destroy_layer_list(NULL, &instance_layers); 3648 return VK_ERROR_LAYER_NOT_PRESENT; 3649 } 3650 3651 if (pProperties == NULL) { 3652 *pPropertyCount = global_ext_list->count; 3653 loader_destroy_layer_list(NULL, &instance_layers); 3654 loader_destroy_generic_list(NULL, (struct loader_generic_list *) 3655 &icd_extensions); 3656 return VK_SUCCESS; 3657 } 3658 3659 copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count; 3660 for (uint32_t i = 0; i < copy_size; i++) { 3661 memcpy(&pProperties[i], 3662 &global_ext_list->list[i], 3663 sizeof(VkExtensionProperties)); 3664 } 3665 *pPropertyCount = copy_size; 3666 loader_destroy_generic_list(NULL, (struct loader_generic_list *) 3667 &icd_extensions); 3668 3669 if (copy_size < global_ext_list->count) { 3670 loader_destroy_layer_list(NULL, &instance_layers); 3671 return VK_INCOMPLETE; 3672 } 3673 3674 loader_destroy_layer_list(NULL, &instance_layers); 3675 return VK_SUCCESS; 3676} 3677 3678LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( 3679 uint32_t* pPropertyCount, 3680 VkLayerProperties* pProperties) 3681{ 3682 3683 struct loader_layer_list instance_layer_list; 3684 tls_instance = NULL; 3685 3686 loader_platform_thread_once(&once_init, loader_initialize); 3687 3688 uint32_t copy_size; 3689 3690 /* get layer libraries */ 3691 memset(&instance_layer_list, 0, sizeof(instance_layer_list)); 3692 loader_layer_scan(NULL, &instance_layer_list, NULL); 3693 3694 if (pProperties == NULL) { 3695 *pPropertyCount = instance_layer_list.count; 3696 loader_destroy_layer_list(NULL, &instance_layer_list); 3697 return VK_SUCCESS; 3698 } 3699 3700 copy_size = (*pPropertyCount < instance_layer_list.count) ? *pPropertyCount : instance_layer_list.count; 3701 for (uint32_t i = 0; i < copy_size; i++) { 3702 memcpy(&pProperties[i], &instance_layer_list.list[i].info, sizeof(VkLayerProperties)); 3703 } 3704 *pPropertyCount = copy_size; 3705 loader_destroy_layer_list(NULL, &instance_layer_list); 3706 3707 if (copy_size < instance_layer_list.count) { 3708 return VK_INCOMPLETE; 3709 } 3710 3711 return VK_SUCCESS; 3712} 3713 3714VKAPI_ATTR VkResult VKAPI_CALL loader_EnumerateDeviceExtensionProperties( 3715 VkPhysicalDevice physicalDevice, 3716 const char* pLayerName, 3717 uint32_t* pPropertyCount, 3718 VkExtensionProperties* pProperties) 3719{ 3720 struct loader_physical_device *phys_dev; 3721 uint32_t copy_size; 3722 3723 uint32_t count; 3724 struct loader_device_extension_list *dev_ext_list=NULL; 3725 //TODO fix this aliases physical devices 3726 phys_dev = loader_get_physical_device(physicalDevice); 3727 3728 /* get layer libraries if needed */ 3729 if (pLayerName && strlen(pLayerName) != 0) { 3730 for (uint32_t i = 0; i < phys_dev->this_instance->device_layer_list.count; i++) { 3731 struct loader_layer_properties *props = &phys_dev->this_instance->device_layer_list.list[i]; 3732 if (strcmp(props->info.layerName, pLayerName) == 0) { 3733 dev_ext_list = &props->device_extension_list; 3734 } 3735 } 3736 } 3737 else { 3738 /* this case is during the call down the instance chain */ 3739 struct loader_icd *icd = phys_dev->this_icd; 3740 VkResult res; 3741 res = icd->EnumerateDeviceExtensionProperties(phys_dev->phys_dev, NULL, pPropertyCount, pProperties); 3742 if (pProperties != NULL && res == VK_SUCCESS) { 3743 /* initialize dev_extension list within the physicalDevice object */ 3744 res = loader_init_device_extensions(phys_dev->this_instance, 3745 phys_dev, *pPropertyCount, pProperties, 3746 &phys_dev->device_extension_cache); 3747 } 3748 return res; 3749 } 3750 3751 count = (dev_ext_list == NULL) ? 0: dev_ext_list->count; 3752 if (pProperties == NULL) { 3753 *pPropertyCount = count; 3754 return VK_SUCCESS; 3755 } 3756 3757 copy_size = *pPropertyCount < count ? *pPropertyCount : count; 3758 for (uint32_t i = 0; i < copy_size; i++) { 3759 memcpy(&pProperties[i], 3760 &dev_ext_list->list[i].props, 3761 sizeof(VkExtensionProperties)); 3762 } 3763 *pPropertyCount = copy_size; 3764 3765 if (copy_size < count) { 3766 return VK_INCOMPLETE; 3767 } 3768 3769 return VK_SUCCESS; 3770} 3771 3772VKAPI_ATTR VkResult VKAPI_CALL loader_EnumerateDeviceLayerProperties( 3773 VkPhysicalDevice physicalDevice, 3774 uint32_t* pPropertyCount, 3775 VkLayerProperties* pProperties) 3776{ 3777 uint32_t copy_size; 3778 struct loader_physical_device *phys_dev; 3779 //TODO fix this, aliases physical devices 3780 phys_dev = loader_get_physical_device(physicalDevice); 3781 uint32_t count = phys_dev->this_instance->device_layer_list.count; 3782 3783 if (pProperties == NULL) { 3784 *pPropertyCount = count; 3785 return VK_SUCCESS; 3786 } 3787 3788 copy_size = (*pPropertyCount < count) ? *pPropertyCount : count; 3789 for (uint32_t i = 0; i < copy_size; i++) { 3790 memcpy(&pProperties[i], &(phys_dev->this_instance->device_layer_list.list[i].info), sizeof(VkLayerProperties)); 3791 } 3792 *pPropertyCount = copy_size; 3793 3794 if (copy_size < count) { 3795 return VK_INCOMPLETE; 3796 } 3797 3798 return VK_SUCCESS; 3799} 3800