loader.c revision 5ea338cc5ae904ca1b8d8c4b911a2f501f66ab25
1/* 2 * Vulkan 3 * 4 * Copyright (C) 2014 LunarG, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included 14 * in all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Chia-I Wu <olv@lunarg.com> 26 * Jon Ashburn <jon@lunarg.com> 27 * Courtney Goeltzenleuchter <courtney@lunarg.com> 28 * Ian Elliott <ian@lunarg.com> 29 */ 30#define _GNU_SOURCE 31#include <stdio.h> 32#include <stdlib.h> 33#include <stdarg.h> 34#include <stdbool.h> 35#include <string.h> 36 37#include <sys/types.h> 38#if defined(WIN32) 39#include "dirent_on_windows.h" 40#else // WIN32 41#include <dirent.h> 42#endif // WIN32 43#include "vk_loader_platform.h" 44#include "loader.h" 45#include "gpa_helper.h" 46#include "table_ops.h" 47#include "debug_report.h" 48#include "vk_icd.h" 49#include "cJSON.h" 50 51static loader_platform_dl_handle loader_add_layer_lib( 52 const struct loader_instance *inst, 53 const char *chain_type, 54 struct loader_layer_properties *layer_prop); 55 56static void loader_remove_layer_lib( 57 struct loader_instance *inst, 58 struct loader_layer_properties *layer_prop); 59 60struct loader_struct loader = {0}; 61// TLS for instance for alloc/free callbacks 62THREAD_LOCAL_DECL struct loader_instance *tls_instance; 63 64static PFN_vkVoidFunction VKAPI loader_GetInstanceProcAddr( 65 VkInstance instance, 66 const char * pName); 67static bool loader_init_ext_list( 68 const struct loader_instance *inst, 69 struct loader_extension_list *ext_info); 70 71enum loader_debug { 72 LOADER_INFO_BIT = 0x01, 73 LOADER_WARN_BIT = 0x02, 74 LOADER_PERF_BIT = 0x04, 75 LOADER_ERROR_BIT = 0x08, 76 LOADER_DEBUG_BIT = 0x10, 77}; 78 79uint32_t g_loader_debug = 0; 80uint32_t g_loader_log_msgs = 0; 81 82//thread safety lock for accessing global data structures such as "loader" 83// all entrypoints on the instance chain need to be locked except GPA 84// additionally CreateDevice and DestroyDevice needs to be locked 85loader_platform_thread_mutex loader_lock; 86loader_platform_thread_mutex loader_json_lock; 87 88// This table contains the loader's instance dispatch table, which contains 89// default functions if no instance layers are activated. This contains 90// pointers to "terminator functions". 91const VkLayerInstanceDispatchTable instance_disp = { 92 .GetInstanceProcAddr = loader_GetInstanceProcAddr, 93 .CreateInstance = loader_CreateInstance, 94 .DestroyInstance = loader_DestroyInstance, 95 .EnumeratePhysicalDevices = loader_EnumeratePhysicalDevices, 96 .GetPhysicalDeviceFeatures = loader_GetPhysicalDeviceFeatures, 97 .GetPhysicalDeviceFormatProperties = loader_GetPhysicalDeviceFormatProperties, 98 .GetPhysicalDeviceImageFormatProperties = loader_GetPhysicalDeviceImageFormatProperties, 99 .GetPhysicalDeviceProperties = loader_GetPhysicalDeviceProperties, 100 .GetPhysicalDeviceQueueFamilyProperties = loader_GetPhysicalDeviceQueueFamilyProperties, 101 .GetPhysicalDeviceMemoryProperties = loader_GetPhysicalDeviceMemoryProperties, 102 .EnumerateDeviceExtensionProperties = loader_EnumerateDeviceExtensionProperties, 103 .EnumerateDeviceLayerProperties = loader_EnumerateDeviceLayerProperties, 104 .GetPhysicalDeviceSparseImageFormatProperties = loader_GetPhysicalDeviceSparseImageFormatProperties, 105 .GetPhysicalDeviceSurfaceSupportKHR = loader_GetPhysicalDeviceSurfaceSupportKHR, 106 .DbgCreateMsgCallback = loader_DbgCreateMsgCallback, 107 .DbgDestroyMsgCallback = loader_DbgDestroyMsgCallback, 108}; 109 110LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init); 111 112void* loader_heap_alloc( 113 const struct loader_instance *instance, 114 size_t size, 115 VkSystemAllocType alloc_type) 116{ 117 if (instance && instance->alloc_callbacks.pfnAlloc) { 118 /* TODO: What should default alignment be? 1, 4, 8, other? */ 119 return instance->alloc_callbacks.pfnAlloc(instance->alloc_callbacks.pUserData, size, 4, alloc_type); 120 } 121 return malloc(size); 122} 123 124void* loader_aligned_heap_alloc( 125 const struct loader_instance *instance, 126 size_t size, 127 size_t alignment, 128 VkSystemAllocType alloc_type) 129{ 130 if (instance && instance->alloc_callbacks.pfnAlloc) { 131 return instance->alloc_callbacks.pfnAlloc(instance->alloc_callbacks.pUserData, size, alignment, alloc_type); 132 } 133#if defined(_WIN32) 134 return _aligned_malloc(alignment, size); 135#else 136 return aligned_alloc(alignment, size); 137#endif 138} 139 140void loader_heap_free( 141 const struct loader_instance *instance, 142 void *pMem) 143{ 144 if (instance && instance->alloc_callbacks.pfnFree) { 145 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMem); 146 return; 147 } 148 free(pMem); 149} 150 151void* loader_heap_realloc( 152 const struct loader_instance *instance, 153 void *pMem, 154 size_t orig_size, 155 size_t size, 156 VkSystemAllocType alloc_type) 157{ 158 if (pMem == NULL || orig_size == 0) 159 return loader_heap_alloc(instance, size, alloc_type); 160 if (size == 0) { 161 loader_heap_free(instance, pMem); 162 return NULL; 163 } 164 if (instance && instance->alloc_callbacks.pfnAlloc) { 165 if (size <= orig_size) { 166 memset(((uint8_t *)pMem) + size, 0, orig_size - size); 167 return pMem; 168 } 169 void *new_ptr = instance->alloc_callbacks.pfnAlloc(instance->alloc_callbacks.pUserData, size, 4, alloc_type); 170 if (!new_ptr) 171 return NULL; 172 memcpy(new_ptr, pMem, orig_size); 173 instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMem); 174 } 175 return realloc(pMem, size); 176} 177 178void *loader_tls_heap_alloc(size_t size) 179{ 180 return loader_heap_alloc(tls_instance, size, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 181} 182 183void loader_tls_heap_free(void *pMem) 184{ 185 return loader_heap_free(tls_instance, pMem); 186} 187 188static void loader_log(VkFlags msg_type, int32_t msg_code, 189 const char *format, ...) 190{ 191 char msg[512]; 192 va_list ap; 193 int ret; 194 195 if (!(msg_type & g_loader_log_msgs)) { 196 return; 197 } 198 199 va_start(ap, format); 200 ret = vsnprintf(msg, sizeof(msg), format, ap); 201 if ((ret >= (int) sizeof(msg)) || ret < 0) { 202 msg[sizeof(msg)-1] = '\0'; 203 } 204 va_end(ap); 205 206#if defined(WIN32) 207 OutputDebugString(msg); 208 OutputDebugString("\n"); 209#endif 210 fputs(msg, stderr); 211 fputc('\n', stderr); 212} 213 214#if defined(WIN32) 215static char *loader_get_next_path(char *path); 216/** 217* Find the list of registry files (names within a key) in key "location". 218* 219* This function looks in the registry (hive = DEFAULT_VK_REGISTRY_HIVE) key as given in "location" 220* for a list or name/values which are added to a returned list (function return value). 221* The DWORD values within the key must be 0 or they are skipped. 222* Function return is a string with a ';' separated list of filenames. 223* Function return is NULL if no valid name/value pairs are found in the key, 224* or the key is not found. 225* 226* \returns 227* A string list of filenames as pointer. 228* When done using the returned string list, pointer should be freed. 229*/ 230static char *loader_get_registry_files(const struct loader_instance *inst, char *location) 231{ 232 LONG rtn_value; 233 HKEY hive, key; 234 DWORD access_flags = KEY_QUERY_VALUE; 235 char name[2048]; 236 char *out = NULL; 237 char *loc = location; 238 char *next; 239 DWORD idx = 0; 240 DWORD name_size = sizeof(name); 241 DWORD value; 242 DWORD total_size = 4096; 243 DWORD value_size = sizeof(value); 244 245 while(*loc) 246 { 247 next = loader_get_next_path(loc); 248 hive = DEFAULT_VK_REGISTRY_HIVE; 249 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 250 if (rtn_value != ERROR_SUCCESS) { 251 // We didn't find the key. Try the 32-bit hive (where we've seen the 252 // key end up on some people's systems): 253 access_flags |= KEY_WOW64_32KEY; 254 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); 255 if (rtn_value != ERROR_SUCCESS) { 256 // We still couldn't find the key, so give up: 257 loc = next; 258 continue; 259 } 260 } 261 262 while ((rtn_value = RegEnumValue(key, idx++, name, &name_size, NULL, NULL, (LPBYTE) &value, &value_size)) == ERROR_SUCCESS) { 263 if (value_size == sizeof(value) && value == 0) { 264 if (out == NULL) { 265 out = loader_heap_alloc(inst, total_size, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 266 out[0] = '\0'; 267 } 268 else if (strlen(out) + name_size + 1 > total_size) { 269 out = loader_heap_realloc(inst, out, total_size, total_size * 2, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 270 total_size *= 2; 271 } 272 if (out == NULL) { 273 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory, failed loader_get_registry_files"); 274 return NULL; 275 } 276 if (strlen(out) == 0) 277 snprintf(out, name_size + 1, "%s", name); 278 else 279 snprintf(out + strlen(out), name_size + 2, "%c%s", PATH_SEPERATOR, name); 280 } 281 name_size = 2048; 282 } 283 loc = next; 284 } 285 286 return out; 287} 288 289#endif // WIN32 290 291/** 292 * Given string of three part form "maj.min.pat" convert to a vulkan version 293 * number. 294 */ 295static uint32_t loader_make_version(const char *vers_str) 296{ 297 uint32_t vers = 0, major=0, minor=0, patch=0; 298 char *minor_str= NULL; 299 char *patch_str = NULL; 300 char *cstr; 301 char *str; 302 303 if (!vers_str) 304 return vers; 305 cstr = loader_stack_alloc(strlen(vers_str) + 1); 306 strcpy(cstr, vers_str); 307 while ((str = strchr(cstr, '.')) != NULL) { 308 if (minor_str == NULL) { 309 minor_str = str + 1; 310 *str = '\0'; 311 major = atoi(cstr); 312 } 313 else if (patch_str == NULL) { 314 patch_str = str + 1; 315 *str = '\0'; 316 minor = atoi(minor_str); 317 } 318 else { 319 return vers; 320 } 321 cstr = str + 1; 322 } 323 patch = atoi(patch_str); 324 325 return VK_MAKE_VERSION(major, minor, patch); 326 327} 328 329bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) 330{ 331 return strcmp(op1->extName, op2->extName) == 0 ? true : false; 332} 333 334/** 335 * Search the given ext_array for an extension 336 * matching the given vk_ext_prop 337 */ 338bool has_vk_extension_property_array( 339 const VkExtensionProperties *vk_ext_prop, 340 const uint32_t count, 341 const VkExtensionProperties *ext_array) 342{ 343 for (uint32_t i = 0; i < count; i++) { 344 if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) 345 return true; 346 } 347 return false; 348} 349 350/** 351 * Search the given ext_list for an extension 352 * matching the given vk_ext_prop 353 */ 354bool has_vk_extension_property( 355 const VkExtensionProperties *vk_ext_prop, 356 const struct loader_extension_list *ext_list) 357{ 358 for (uint32_t i = 0; i < ext_list->count; i++) { 359 if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) 360 return true; 361 } 362 return false; 363} 364 365static inline bool loader_is_layer_type_device(const enum layer_type type) { 366 if ((type & VK_LAYER_TYPE_DEVICE_EXPLICIT) || 367 (type & VK_LAYER_TYPE_DEVICE_IMPLICIT)) 368 return true; 369 return false; 370} 371 372/* 373 * Search the given layer list for a layer matching the given layer name 374 */ 375static struct loader_layer_properties *loader_get_layer_property( 376 const char *name, 377 const struct loader_layer_list *layer_list) 378{ 379 for (uint32_t i = 0; i < layer_list->count; i++) { 380 const VkLayerProperties *item = &layer_list->list[i].info; 381 if (strcmp(name, item->layerName) == 0) 382 return &layer_list->list[i]; 383 } 384 return NULL; 385} 386 387/** 388 * Get the next unused layer property in the list. Init the property to zero. 389 */ 390static struct loader_layer_properties *loader_get_next_layer_property( 391 const struct loader_instance *inst, 392 struct loader_layer_list *layer_list) 393{ 394 if (layer_list->capacity == 0) { 395 layer_list->list = loader_heap_alloc(inst, 396 sizeof(struct loader_layer_properties) * 64, 397 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 398 if (layer_list->list == NULL) { 399 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't add any layer properties to list"); 400 return NULL; 401 } 402 memset(layer_list->list, 0, sizeof(struct loader_layer_properties) * 64); 403 layer_list->capacity = sizeof(struct loader_layer_properties) * 64; 404 } 405 406 // ensure enough room to add an entry 407 if ((layer_list->count + 1) * sizeof (struct loader_layer_properties) 408 > layer_list->capacity) { 409 layer_list->list = loader_heap_realloc(inst, layer_list->list, 410 layer_list->capacity, 411 layer_list->capacity * 2, 412 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 413 if (layer_list->list == NULL) { 414 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 415 "realloc failed for layer list"); 416 } 417 layer_list->capacity *= 2; 418 } 419 420 layer_list->count++; 421 return &(layer_list->list[layer_list->count - 1]); 422} 423 424/** 425 * Remove all layer properties entrys from the list 426 */ 427void loader_delete_layer_properties( 428 const struct loader_instance *inst, 429 struct loader_layer_list *layer_list) 430{ 431 uint32_t i; 432 433 if (!layer_list) 434 return; 435 436 for (i = 0; i < layer_list->count; i++) { 437 loader_destroy_ext_list(inst, &layer_list->list[i].instance_extension_list); 438 loader_destroy_ext_list(inst, &layer_list->list[i].device_extension_list); 439 } 440 layer_list->count = 0; 441 442 if (layer_list->capacity > 0) { 443 layer_list->capacity = 0; 444 loader_heap_free(inst, layer_list->list); 445 } 446 447} 448 449static void loader_add_global_extensions( 450 const struct loader_instance *inst, 451 const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, 452 const char *lib_name, 453 struct loader_extension_list *ext_list) 454{ 455 uint32_t i, count; 456 VkExtensionProperties *ext_props; 457 VkResult res; 458 459 if (!fp_get_props) { 460 /* No EnumerateInstanceExtensionProperties defined */ 461 return; 462 } 463 464 res = fp_get_props(NULL, &count, NULL); 465 if (res != VK_SUCCESS) { 466 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Error getting global extension count from %s", lib_name); 467 return; 468 } 469 470 if (count == 0) { 471 /* No ExtensionProperties to report */ 472 return; 473 } 474 475 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); 476 477 res = fp_get_props(NULL, &count, ext_props); 478 if (res != VK_SUCCESS) { 479 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Error getting global extensions from %s", lib_name); 480 return; 481 } 482 483 for (i = 0; i < count; i++) { 484 char spec_version[64]; 485 486 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 487 VK_MAJOR(ext_props[i].specVersion), 488 VK_MINOR(ext_props[i].specVersion), 489 VK_PATCH(ext_props[i].specVersion)); 490 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 491 "Global Extension: %s (%s) version %s", 492 ext_props[i].extName, lib_name, spec_version); 493 loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 494 } 495 496 return; 497} 498 499static void loader_add_physical_device_extensions( 500 const struct loader_instance *inst, 501 PFN_vkEnumerateDeviceExtensionProperties get_phys_dev_ext_props, 502 VkPhysicalDevice physical_device, 503 const char *lib_name, 504 struct loader_extension_list *ext_list) 505{ 506 uint32_t i, count; 507 VkResult res; 508 VkExtensionProperties *ext_props; 509 510 if (!get_phys_dev_ext_props) { 511 /* No EnumerateDeviceExtensionProperties defined */ 512 return; 513 } 514 515 res = get_phys_dev_ext_props(physical_device, NULL, &count, NULL); 516 if (res == VK_SUCCESS && count > 0) { 517 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); 518 519 res = get_phys_dev_ext_props(physical_device, NULL, &count, ext_props); 520 for (i = 0; i < count; i++) { 521 char spec_version[64]; 522 523 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", 524 VK_MAJOR(ext_props[i].specVersion), 525 VK_MINOR(ext_props[i].specVersion), 526 VK_PATCH(ext_props[i].specVersion)); 527 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 528 "PhysicalDevice Extension: %s (%s) version %s", 529 ext_props[i].extName, lib_name, spec_version); 530 loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); 531 } 532 } else { 533 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Error getting physical device extension info count from library %s", lib_name); 534 } 535 536 return; 537} 538 539static bool loader_init_ext_list(const struct loader_instance *inst, 540 struct loader_extension_list *ext_info) 541{ 542 ext_info->capacity = 32 * sizeof(VkExtensionProperties); 543 ext_info->list = loader_heap_alloc(inst, ext_info->capacity, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 544 if (ext_info->list == NULL) { 545 return false; 546 } 547 memset(ext_info->list, 0, ext_info->capacity); 548 ext_info->count = 0; 549 return true; 550} 551 552void loader_destroy_ext_list(const struct loader_instance *inst, 553 struct loader_extension_list *ext_info) 554{ 555 loader_heap_free(inst, ext_info->list); 556 ext_info->count = 0; 557 ext_info->capacity = 0; 558} 559 560/* 561 * Append non-duplicate extension properties defined in props 562 * to the given ext_list. 563 */ 564void loader_add_to_ext_list( 565 const struct loader_instance *inst, 566 struct loader_extension_list *ext_list, 567 uint32_t prop_list_count, 568 const VkExtensionProperties *props) 569{ 570 uint32_t i; 571 const VkExtensionProperties *cur_ext; 572 573 if (ext_list->list == NULL || ext_list->capacity == 0) { 574 loader_init_ext_list(inst, ext_list); 575 } 576 577 if (ext_list->list == NULL) 578 return; 579 580 for (i = 0; i < prop_list_count; i++) { 581 cur_ext = &props[i]; 582 583 // look for duplicates 584 if (has_vk_extension_property(cur_ext, ext_list)) { 585 continue; 586 } 587 588 // add to list at end 589 // check for enough capacity 590 if (ext_list->count * sizeof(VkExtensionProperties) 591 >= ext_list->capacity) { 592 593 ext_list->list = loader_heap_realloc(inst, 594 ext_list->list, 595 ext_list->capacity, 596 ext_list->capacity * 2, 597 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 598 // double capacity 599 ext_list->capacity *= 2; 600 } 601 602 memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties)); 603 ext_list->count++; 604 } 605} 606 607/** 608 * Search the given search_list for any layers in the props list. 609 * Add these to the output layer_list. Don't add duplicates to the output layer_list. 610 */ 611static VkResult loader_add_layer_names_to_list( 612 const struct loader_instance *inst, 613 struct loader_layer_list *output_list, 614 uint32_t name_count, 615 const char * const *names, 616 const struct loader_layer_list *search_list) 617{ 618 struct loader_layer_properties *layer_prop; 619 VkResult err = VK_SUCCESS; 620 621 for (uint32_t i = 0; i < name_count; i++) { 622 const char *search_target = names[i]; 623 layer_prop = loader_get_layer_property(search_target, search_list); 624 if (!layer_prop) { 625 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Unable to find layer %s", search_target); 626 err = VK_ERROR_LAYER_NOT_PRESENT; 627 continue; 628 } 629 630 loader_add_to_layer_list(inst, output_list, 1, layer_prop); 631 } 632 633 return err; 634} 635 636 637/* 638 * Manage lists of VkLayerProperties 639 */ 640static bool loader_init_layer_list(const struct loader_instance *inst, 641 struct loader_layer_list *list) 642{ 643 list->capacity = 32 * sizeof(struct loader_layer_properties); 644 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 645 if (list->list == NULL) { 646 return false; 647 } 648 memset(list->list, 0, list->capacity); 649 list->count = 0; 650 return true; 651} 652 653void loader_destroy_layer_list(const struct loader_instance *inst, 654 struct loader_layer_list *layer_list) 655{ 656 loader_heap_free(inst, layer_list->list); 657 layer_list->count = 0; 658 layer_list->capacity = 0; 659} 660 661/* 662 * Manage list of layer libraries (loader_lib_info) 663 */ 664static bool loader_init_layer_library_list(const struct loader_instance *inst, 665 struct loader_layer_library_list *list) 666{ 667 list->capacity = 32 * sizeof(struct loader_lib_info); 668 list->list = loader_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 669 if (list->list == NULL) { 670 return false; 671 } 672 memset(list->list, 0, list->capacity); 673 list->count = 0; 674 return true; 675} 676 677void loader_destroy_layer_library_list(const struct loader_instance *inst, 678 struct loader_layer_library_list *list) 679{ 680 for (uint32_t i = 0; i < list->count; i++) { 681 loader_heap_free(inst, list->list[i].lib_name); 682 } 683 loader_heap_free(inst, list->list); 684 list->count = 0; 685 list->capacity = 0; 686} 687 688void loader_add_to_layer_library_list( 689 const struct loader_instance *inst, 690 struct loader_layer_library_list *list, 691 uint32_t item_count, 692 const struct loader_lib_info *new_items) 693{ 694 uint32_t i; 695 struct loader_lib_info *item; 696 697 if (list->list == NULL || list->capacity == 0) { 698 loader_init_layer_library_list(inst, list); 699 } 700 701 if (list->list == NULL) 702 return; 703 704 for (i = 0; i < item_count; i++) { 705 item = (struct loader_lib_info *) &new_items[i]; 706 707 // look for duplicates 708 for (uint32_t j = 0; j < list->count; j++) { 709 if (strcmp(list->list[i].lib_name, new_items->lib_name) == 0) { 710 continue; 711 } 712 } 713 714 // add to list at end 715 // check for enough capacity 716 if (list->count * sizeof(struct loader_lib_info) 717 >= list->capacity) { 718 719 list->list = loader_heap_realloc(inst, 720 list->list, 721 list->capacity, 722 list->capacity * 2, 723 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 724 // double capacity 725 list->capacity *= 2; 726 } 727 728 memcpy(&list->list[list->count], item, sizeof(struct loader_lib_info)); 729 list->count++; 730 } 731} 732 733 734/* 735 * Search the given layer list for a list 736 * matching the given VkLayerProperties 737 */ 738bool has_vk_layer_property( 739 const VkLayerProperties *vk_layer_prop, 740 const struct loader_layer_list *list) 741{ 742 for (uint32_t i = 0; i < list->count; i++) { 743 if (strcmp(vk_layer_prop->layerName, list->list[i].info.layerName) == 0) 744 return true; 745 } 746 return false; 747} 748 749/* 750 * Search the given layer list for a layer 751 * matching the given name 752 */ 753bool has_layer_name( 754 const char *name, 755 const struct loader_layer_list *list) 756{ 757 for (uint32_t i = 0; i < list->count; i++) { 758 if (strcmp(name, list->list[i].info.layerName) == 0) 759 return true; 760 } 761 return false; 762} 763 764/* 765 * Append non-duplicate layer properties defined in prop_list 766 * to the given layer_info list 767 */ 768void loader_add_to_layer_list( 769 const struct loader_instance *inst, 770 struct loader_layer_list *list, 771 uint32_t prop_list_count, 772 const struct loader_layer_properties *props) 773{ 774 uint32_t i; 775 struct loader_layer_properties *layer; 776 777 if (list->list == NULL || list->capacity == 0) { 778 loader_init_layer_list(inst, list); 779 } 780 781 if (list->list == NULL) 782 return; 783 784 for (i = 0; i < prop_list_count; i++) { 785 layer = (struct loader_layer_properties *) &props[i]; 786 787 // look for duplicates 788 if (has_vk_layer_property(&layer->info, list)) { 789 continue; 790 } 791 792 // add to list at end 793 // check for enough capacity 794 if (list->count * sizeof(struct loader_layer_properties) 795 >= list->capacity) { 796 797 list->list = loader_heap_realloc(inst, 798 list->list, 799 list->capacity, 800 list->capacity * 2, 801 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 802 // double capacity 803 list->capacity *= 2; 804 } 805 806 memcpy(&list->list[list->count], layer, sizeof(struct loader_layer_properties)); 807 list->count++; 808 } 809} 810 811/** 812 * Search the search_list for any layer with a name 813 * that matches the given name and a type that matches the given type 814 * Add all matching layers to the found_list 815 * Do not add if found loader_layer_properties is already 816 * on the found_list. 817 */ 818static void loader_find_layer_name_add_list( 819 const struct loader_instance *inst, 820 const char *name, 821 const enum layer_type type, 822 const struct loader_layer_list *search_list, 823 struct loader_layer_list *found_list) 824{ 825 for (uint32_t i = 0; i < search_list->count; i++) { 826 struct loader_layer_properties *layer_prop = &search_list->list[i]; 827 if (0 == strcmp(layer_prop->info.layerName, name) && 828 (layer_prop->type & type)) { 829 /* Found a layer with the same name, add to found_list */ 830 loader_add_to_layer_list(inst, found_list, 1, layer_prop); 831 } 832 } 833} 834 835static VkExtensionProperties *get_extension_property( 836 const char *name, 837 const struct loader_extension_list *list) 838{ 839 for (uint32_t i = 0; i < list->count; i++) { 840 if (strcmp(name, list->list[i].extName) == 0) 841 return &list->list[i]; 842 } 843 return NULL; 844} 845 846/* 847 * For global exenstions implemented within the loader (i.e. DEBUG_REPORT 848 * the extension must provide two entry points for the loader to use: 849 * - "trampoline" entry point - this is the address returned by GetProcAddr 850 * and will always do what's necessary to support a global call. 851 * - "terminator" function - this function will be put at the end of the 852 * instance chain and will contain the necessary logica to call / process 853 * the extension for the appropriate ICDs that are available. 854 * There is no generic mechanism for including these functions, the references 855 * must be placed into the appropriate loader entry points. 856 * GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr requests 857 * loader_coalesce_extensions(void) - add extension records to the list of global 858 * extension available to the app. 859 * instance_disp - add function pointer for terminator function to this array. 860 * The extension itself should be in a separate file that will be 861 * linked directly with the loader. 862 */ 863 864void loader_get_icd_loader_instance_extensions( 865 const struct loader_instance *inst, 866 struct loader_icd_libs *icd_libs, 867 struct loader_extension_list *inst_exts) 868{ 869 struct loader_extension_list icd_exts; 870 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Build ICD instance extension list"); 871 // traverse scanned icd list adding non-duplicate extensions to the list 872 for (uint32_t i = 0; i < icd_libs->count; i++) { 873 loader_init_ext_list(inst, &icd_exts); 874 loader_add_global_extensions(inst, icd_libs->list[i].EnumerateInstanceExtensionProperties, 875 icd_libs->list[i].lib_name, 876 &icd_exts); 877 loader_add_to_ext_list(inst, inst_exts, 878 icd_exts.count, 879 icd_exts.list); 880 loader_destroy_ext_list(inst, &icd_exts); 881 }; 882 883 // Traverse loader's extensions, adding non-duplicate extensions to the list 884 wsi_swapchain_add_instance_extensions(inst, inst_exts); 885 debug_report_add_instance_extensions(inst, inst_exts); 886} 887 888struct loader_icd *loader_get_icd_and_device(const VkDevice device, 889 struct loader_device **found_dev) 890{ 891 *found_dev = NULL; 892 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 893 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) { 894 for (struct loader_device *dev = icd->logical_device_list; dev; dev = dev->next) 895 if (dev->device == device) { 896 *found_dev = dev; 897 return icd; 898 } 899 } 900 } 901 return NULL; 902} 903 904static void loader_destroy_logical_device(const struct loader_instance *inst, 905 struct loader_device *dev) 906{ 907 loader_heap_free(inst, dev->app_extension_props); 908 if (dev->activated_layer_list.count) 909 loader_destroy_layer_list(inst, &dev->activated_layer_list); 910 loader_heap_free(inst, dev); 911} 912 913static struct loader_device *loader_add_logical_device( 914 const struct loader_instance *inst, 915 const VkDevice dev, 916 struct loader_device **device_list) 917{ 918 struct loader_device *new_dev; 919 920 new_dev = loader_heap_alloc(inst, sizeof(struct loader_device), VK_SYSTEM_ALLOC_TYPE_INTERNAL); 921 if (!new_dev) { 922 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc struct laoder-device"); 923 return NULL; 924 } 925 926 memset(new_dev, 0, sizeof(struct loader_device)); 927 928 new_dev->next = *device_list; 929 new_dev->device = dev; 930 *device_list = new_dev; 931 return new_dev; 932} 933 934void loader_remove_logical_device( 935 const struct loader_instance *inst, 936 VkDevice device) 937{ 938 struct loader_device *found_dev, *dev, *prev_dev; 939 struct loader_icd *icd; 940 icd = loader_get_icd_and_device(device, &found_dev); 941 942 if (!icd || !found_dev) 943 return; 944 945 prev_dev = NULL; 946 dev = icd->logical_device_list; 947 while (dev && dev != found_dev) { 948 prev_dev = dev; 949 dev = dev->next; 950 } 951 952 if (prev_dev) 953 prev_dev->next = found_dev->next; 954 else 955 icd->logical_device_list = found_dev->next; 956 loader_destroy_logical_device(inst, found_dev); 957} 958 959 960static void loader_icd_destroy( 961 struct loader_instance *ptr_inst, 962 struct loader_icd *icd) 963{ 964 ptr_inst->total_icd_count--; 965 loader_heap_free(ptr_inst, icd->gpus); 966 for (struct loader_device *dev = icd->logical_device_list; dev; ) { 967 struct loader_device *next_dev = dev->next; 968 loader_destroy_logical_device(ptr_inst, dev); 969 dev = next_dev; 970 } 971 972 loader_heap_free(ptr_inst, icd); 973} 974 975static struct loader_icd * loader_icd_create(const struct loader_instance *inst) 976{ 977 struct loader_icd *icd; 978 979 icd = loader_heap_alloc(inst, sizeof(*icd), VK_SYSTEM_ALLOC_TYPE_INTERNAL); 980 if (!icd) 981 return NULL; 982 983 memset(icd, 0, sizeof(*icd)); 984 985 return icd; 986} 987 988static struct loader_icd *loader_icd_add( 989 struct loader_instance *ptr_inst, 990 const struct loader_scanned_icds *icd_lib) 991{ 992 struct loader_icd *icd; 993 994 icd = loader_icd_create(ptr_inst); 995 if (!icd) 996 return NULL; 997 998 icd->this_icd_lib = icd_lib; 999 icd->this_instance = ptr_inst; 1000 1001 /* prepend to the list */ 1002 icd->next = ptr_inst->icds; 1003 ptr_inst->icds = icd; 1004 ptr_inst->total_icd_count++; 1005 1006 return icd; 1007} 1008 1009void loader_scanned_icd_clear( 1010 const struct loader_instance *inst, 1011 struct loader_icd_libs *icd_libs) 1012{ 1013 if (icd_libs->capacity == 0) 1014 return; 1015 for (uint32_t i = 0; i < icd_libs->count; i++) { 1016 loader_platform_close_library(icd_libs->list[i].handle); 1017 loader_heap_free(inst, icd_libs->list[i].lib_name); 1018 } 1019 loader_heap_free(inst, icd_libs->list); 1020 icd_libs->capacity = 0; 1021 icd_libs->count = 0; 1022 icd_libs->list = NULL; 1023} 1024 1025static void loader_scanned_icd_init(const struct loader_instance *inst, 1026 struct loader_icd_libs *icd_libs) 1027{ 1028 loader_scanned_icd_clear(inst, icd_libs); 1029 icd_libs->capacity = 8 * sizeof(struct loader_scanned_icds); 1030 icd_libs->list = loader_heap_alloc(inst, icd_libs->capacity, VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1031 1032} 1033 1034static void loader_scanned_icd_add( 1035 const struct loader_instance *inst, 1036 struct loader_icd_libs *icd_libs, 1037 const char *filename) 1038{ 1039 loader_platform_dl_handle handle; 1040 PFN_vkCreateInstance fp_create_inst; 1041 PFN_vkEnumerateInstanceExtensionProperties fp_get_global_ext_props; 1042 PFN_vkGetInstanceProcAddr fp_get_proc_addr; 1043 struct loader_scanned_icds *new_node; 1044 1045 /* TODO implement ref counting of libraries, for now this function leaves 1046 libraries open and the scanned_icd_clear closes them */ 1047 // Used to call: dlopen(filename, RTLD_LAZY); 1048 handle = loader_platform_open_library(filename); 1049 if (!handle) { 1050 loader_log(VK_DBG_REPORT_WARN_BIT, 0, loader_platform_open_library_error(filename)); 1051 return; 1052 } 1053 1054#define LOOKUP_LD(func_ptr, func) do { \ 1055 func_ptr = (PFN_vk ##func) loader_platform_get_proc_address(handle, "vk" #func); \ 1056 if (!func_ptr) { \ 1057 loader_log(VK_DBG_REPORT_WARN_BIT, 0, loader_platform_get_proc_address_error("vk" #func)); \ 1058 return; \ 1059 } \ 1060} while (0) 1061 1062 LOOKUP_LD(fp_get_proc_addr, GetInstanceProcAddr); 1063 LOOKUP_LD(fp_create_inst, CreateInstance); 1064 LOOKUP_LD(fp_get_global_ext_props, EnumerateInstanceExtensionProperties); 1065 1066#undef LOOKUP_LD 1067 1068 // check for enough capacity 1069 if ((icd_libs->count * sizeof(struct loader_scanned_icds)) >= icd_libs->capacity) { 1070 1071 icd_libs->list = loader_heap_realloc(inst, 1072 icd_libs->list, 1073 icd_libs->capacity, 1074 icd_libs->capacity * 2, 1075 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1076 // double capacity 1077 icd_libs->capacity *= 2; 1078 } 1079 new_node = &(icd_libs->list[icd_libs->count]); 1080 1081 new_node->handle = handle; 1082 new_node->GetInstanceProcAddr = fp_get_proc_addr; 1083 new_node->CreateInstance = fp_create_inst; 1084 new_node->EnumerateInstanceExtensionProperties = fp_get_global_ext_props; 1085 1086 new_node->lib_name = (char *) loader_heap_alloc(inst, 1087 strlen(filename) + 1, 1088 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1089 if (!new_node->lib_name) { 1090 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Out of memory can't add icd"); 1091 return; 1092 } 1093 strcpy(new_node->lib_name, filename); 1094 icd_libs->count++; 1095} 1096 1097static bool loader_icd_init_entrys(struct loader_icd *icd, 1098 VkInstance inst, 1099 const PFN_vkGetInstanceProcAddr fp_gipa) 1100{ 1101 /* initialize entrypoint function pointers */ 1102 1103 #define LOOKUP_GIPA(func, required) do { \ 1104 icd->func = (PFN_vk ##func) fp_gipa(inst, "vk" #func); \ 1105 if (!icd->func && required) { \ 1106 loader_log(VK_DBG_REPORT_WARN_BIT, 0, \ 1107 loader_platform_get_proc_address_error("vk" #func)); \ 1108 return false; \ 1109 } \ 1110 } while (0) 1111 1112 LOOKUP_GIPA(GetDeviceProcAddr, true); 1113 LOOKUP_GIPA(DestroyInstance, true); 1114 LOOKUP_GIPA(EnumeratePhysicalDevices, true); 1115 LOOKUP_GIPA(GetPhysicalDeviceFeatures, true); 1116 LOOKUP_GIPA(GetPhysicalDeviceFormatProperties, true); 1117 LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties, true); 1118 LOOKUP_GIPA(CreateDevice, true); 1119 LOOKUP_GIPA(GetPhysicalDeviceProperties, true); 1120 LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties, true); 1121 LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties, true); 1122 LOOKUP_GIPA(EnumerateDeviceExtensionProperties, true); 1123 LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties, true); 1124 LOOKUP_GIPA(DbgCreateMsgCallback, false); 1125 LOOKUP_GIPA(DbgDestroyMsgCallback, false); 1126 LOOKUP_GIPA(GetPhysicalDeviceSurfaceSupportKHR, false); 1127 1128#undef LOOKUP_GIPA 1129 1130 return true; 1131} 1132 1133static void loader_debug_init(void) 1134{ 1135 const char *env; 1136 1137 if (g_loader_debug > 0) 1138 return; 1139 1140 g_loader_debug = 0; 1141 1142 /* parse comma-separated debug options */ 1143 env = getenv("VK_LOADER_DEBUG"); 1144 while (env) { 1145 const char *p = strchr(env, ','); 1146 size_t len; 1147 1148 if (p) 1149 len = p - env; 1150 else 1151 len = strlen(env); 1152 1153 if (len > 0) { 1154 if (strncmp(env, "warn", len) == 0) { 1155 g_loader_debug |= LOADER_WARN_BIT; 1156 g_loader_log_msgs |= VK_DBG_REPORT_WARN_BIT; 1157 } else if (strncmp(env, "info", len) == 0) { 1158 g_loader_debug |= LOADER_INFO_BIT; 1159 g_loader_log_msgs |= VK_DBG_REPORT_INFO_BIT; 1160 } else if (strncmp(env, "perf", len) == 0) { 1161 g_loader_debug |= LOADER_PERF_BIT; 1162 g_loader_log_msgs |= VK_DBG_REPORT_PERF_WARN_BIT; 1163 } else if (strncmp(env, "error", len) == 0) { 1164 g_loader_debug |= LOADER_ERROR_BIT; 1165 g_loader_log_msgs |= VK_DBG_REPORT_ERROR_BIT; 1166 } else if (strncmp(env, "debug", len) == 0) { 1167 g_loader_debug |= LOADER_DEBUG_BIT; 1168 g_loader_log_msgs |= VK_DBG_REPORT_DEBUG_BIT; 1169 } 1170 } 1171 1172 if (!p) 1173 break; 1174 1175 env = p + 1; 1176 } 1177} 1178 1179void loader_initialize(void) 1180{ 1181 // initialize mutexs 1182 loader_platform_thread_create_mutex(&loader_lock); 1183 loader_platform_thread_create_mutex(&loader_json_lock); 1184 1185 // initialize logging 1186 loader_debug_init(); 1187 1188 // initial cJSON to use alloc callbacks 1189 cJSON_Hooks alloc_fns = { 1190 .malloc_fn = loader_tls_heap_alloc, 1191 .free_fn = loader_tls_heap_free, 1192 }; 1193 cJSON_InitHooks(&alloc_fns); 1194} 1195 1196struct loader_manifest_files { 1197 uint32_t count; 1198 char **filename_list; 1199}; 1200 1201/** 1202 * Get next file or dirname given a string list or registry key path 1203 * 1204 * \returns 1205 * A pointer to first char in the next path. 1206 * The next path (or NULL) in the list is returned in next_path. 1207 * Note: input string is modified in some cases. PASS IN A COPY! 1208 */ 1209static char *loader_get_next_path(char *path) 1210{ 1211 uint32_t len; 1212 char *next; 1213 1214 if (path == NULL) 1215 return NULL; 1216 next = strchr(path, PATH_SEPERATOR); 1217 if (next == NULL) { 1218 len = (uint32_t) strlen(path); 1219 next = path + len; 1220 } 1221 else { 1222 *next = '\0'; 1223 next++; 1224 } 1225 1226 return next; 1227} 1228 1229/** 1230 * Given a path which is absolute or relative. Expand the path if relative otherwise 1231 * leave the path unmodified if absolute. The path which is relative from is 1232 * given in rel_base and should include trailing directory seperator '/' 1233 * 1234 * \returns 1235 * A string in out_fullpath of the full absolute path 1236 * Side effect is that dir string maybe modified. 1237 */ 1238static void loader_expand_path(const char *path, 1239 const char *rel_base, 1240 size_t out_size, 1241 char *out_fullpath) 1242{ 1243 if (loader_platform_is_path_absolute(path)) { 1244 strncpy(out_fullpath, path, out_size); 1245 out_fullpath[out_size - 1] = '\0'; 1246 } 1247 else { 1248 // convert relative to absolute path based on rel_base 1249 size_t len = strlen(path); 1250 strncpy(out_fullpath, rel_base, out_size); 1251 out_fullpath[out_size - 1] = '\0'; 1252 assert(out_size >= strlen(out_fullpath) + len + 1); 1253 strncat(out_fullpath, path, len); 1254 } 1255} 1256 1257/** 1258 * Given a filename (file) and a list of paths (dir), try to find an existing 1259 * file in the paths. If filename already is a path then no 1260 * searching in the given paths. 1261 * 1262 * \returns 1263 * A string in out_fullpath of either the full path or file. 1264 * Side effect is that dir string maybe modified. 1265 */ 1266static void loader_get_fullpath(const char *file, 1267 char *dir, 1268 size_t out_size, 1269 char *out_fullpath) 1270{ 1271 char *next_dir; 1272 if (strchr(file,DIRECTORY_SYMBOL) == NULL) { 1273 //find file exists with prepending given path 1274 while (*dir) { 1275 next_dir = loader_get_next_path(dir); 1276 snprintf(out_fullpath, out_size, "%s%c%s", 1277 dir, DIRECTORY_SYMBOL, file); 1278 if (loader_platform_file_exists(out_fullpath)) { 1279 return; 1280 } 1281 dir = next_dir; 1282 } 1283 } 1284 snprintf(out_fullpath, out_size, "%s", file); 1285} 1286 1287/** 1288 * Read a JSON file into a buffer. 1289 * 1290 * \returns 1291 * A pointer to a cJSON object representing the JSON parse tree. 1292 * This returned buffer should be freed by caller. 1293 */ 1294static cJSON *loader_get_json(const char *filename) 1295{ 1296 FILE *file; 1297 char *json_buf; 1298 cJSON *json; 1299 uint64_t len; 1300 file = fopen(filename,"rb"); 1301 if (!file) { 1302 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Couldn't open JSON file %s", filename); 1303 return NULL; 1304 } 1305 fseek(file, 0, SEEK_END); 1306 len = ftell(file); 1307 fseek(file, 0, SEEK_SET); 1308 json_buf = (char*) loader_stack_alloc(len+1); 1309 if (json_buf == NULL) { 1310 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get JSON file"); 1311 fclose(file); 1312 return NULL; 1313 } 1314 if (fread(json_buf, sizeof(char), len, file) != len) { 1315 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "fread failed can't get JSON file"); 1316 fclose(file); 1317 return NULL; 1318 } 1319 fclose(file); 1320 json_buf[len] = '\0'; 1321 1322 //parse text from file 1323 json = cJSON_Parse(json_buf); 1324 if (json == NULL) 1325 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Can't parse JSON file %s", filename); 1326 return json; 1327} 1328 1329/** 1330 * Do a deep copy of the loader_layer_properties structure. 1331 */ 1332static void loader_copy_layer_properties( 1333 const struct loader_instance *inst, 1334 struct loader_layer_properties *dst, 1335 struct loader_layer_properties *src) 1336{ 1337 memcpy(dst, src, sizeof (*src)); 1338 dst->instance_extension_list.list = loader_heap_alloc( 1339 inst, 1340 sizeof(VkExtensionProperties) * 1341 src->instance_extension_list.count, 1342 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1343 dst->instance_extension_list.capacity = sizeof(VkExtensionProperties) * 1344 src->instance_extension_list.count; 1345 memcpy(dst->instance_extension_list.list, src->instance_extension_list.list, 1346 dst->instance_extension_list.capacity); 1347 dst->device_extension_list.list = loader_heap_alloc( 1348 inst, 1349 sizeof(VkExtensionProperties) * 1350 src->device_extension_list.count, 1351 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1352 dst->device_extension_list.capacity = sizeof(VkExtensionProperties) * 1353 src->device_extension_list.count; 1354 memcpy(dst->device_extension_list.list, src->device_extension_list.list, 1355 dst->device_extension_list.capacity); 1356} 1357 1358/** 1359 * Given a cJSON struct (json) of the top level JSON object from layer manifest 1360 * file, add entry to the layer_list. 1361 * Fill out the layer_properties in this list entry from the input cJSON object. 1362 * 1363 * \returns 1364 * void 1365 * layer_list has a new entry and initialized accordingly. 1366 * If the json input object does not have all the required fields no entry 1367 * is added to the list. 1368 */ 1369static void loader_add_layer_properties(const struct loader_instance *inst, 1370 struct loader_layer_list *layer_instance_list, 1371 struct loader_layer_list *layer_device_list, 1372 cJSON *json, 1373 bool is_implicit, 1374 char *filename) 1375{ 1376 /* Fields in layer manifest file that are required: 1377 * (required) “file_format_version” 1378 * following are required in the "layer" object: 1379 * (required) "name" 1380 * (required) "type" 1381 * (required) “library_path” 1382 * (required) “abi_versions” 1383 * (required) “implementation_version” 1384 * (required) “description” 1385 * (required for implicit layers) “disable_environment” 1386 * 1387 * First get all required items and if any missing abort 1388 */ 1389 1390 cJSON *item, *layer_node, *ext_item; 1391 char *temp; 1392 char *name, *type, *library_path, *abi_versions; 1393 char *implementation_version, *description; 1394 cJSON *disable_environment; 1395 int i; 1396 VkExtensionProperties ext_prop; 1397 item = cJSON_GetObjectItem(json, "file_format_version"); 1398 if (item == NULL) { 1399 return; 1400 } 1401 char *file_vers = cJSON_PrintUnformatted(item); 1402 loader_log(VK_DBG_REPORT_INFO_BIT, 0, "Found manifest file %s, version %s", 1403 filename, file_vers); 1404 if (strcmp(file_vers, "\"0.9.0\"") != 0) 1405 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Unexpected manifest file version (expected 0.9.0), may cause errors"); 1406 loader_tls_heap_free(file_vers); 1407 1408 layer_node = cJSON_GetObjectItem(json, "layer"); 1409 if (layer_node == NULL) { 1410 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"layer\" object in manifest JSON file, skipping"); 1411 return; 1412 } 1413 1414 // loop through all "layer" objects in the file 1415 do { 1416#define GET_JSON_OBJECT(node, var) { \ 1417 var = cJSON_GetObjectItem(node, #var); \ 1418 if (var == NULL) { \ 1419 layer_node = layer_node->next; \ 1420 continue; \ 1421 } \ 1422 } 1423#define GET_JSON_ITEM(node, var) { \ 1424 item = cJSON_GetObjectItem(node, #var); \ 1425 if (item == NULL) { \ 1426 layer_node = layer_node->next; \ 1427 continue; \ 1428 } \ 1429 temp = cJSON_Print(item); \ 1430 temp[strlen(temp) - 1] = '\0'; \ 1431 var = loader_stack_alloc(strlen(temp) + 1); \ 1432 strcpy(var, &temp[1]); \ 1433 loader_tls_heap_free(temp); \ 1434 } 1435 GET_JSON_ITEM(layer_node, name) 1436 GET_JSON_ITEM(layer_node, type) 1437 GET_JSON_ITEM(layer_node, library_path) 1438 GET_JSON_ITEM(layer_node, abi_versions) 1439 GET_JSON_ITEM(layer_node, implementation_version) 1440 GET_JSON_ITEM(layer_node, description) 1441 if (is_implicit) { 1442 GET_JSON_OBJECT(layer_node, disable_environment) 1443 } 1444#undef GET_JSON_ITEM 1445#undef GET_JSON_OBJECT 1446 1447 // add list entry 1448 struct loader_layer_properties *props=NULL; 1449 if (!strcmp(type, "DEVICE")) { 1450 if (layer_device_list == NULL) { 1451 layer_node = layer_node->next; 1452 continue; 1453 } 1454 props = loader_get_next_layer_property(inst, layer_device_list); 1455 props->type = (is_implicit) ? VK_LAYER_TYPE_DEVICE_IMPLICIT : VK_LAYER_TYPE_DEVICE_EXPLICIT; 1456 } 1457 if (!strcmp(type, "INSTANCE")) { 1458 if (layer_instance_list == NULL) { 1459 layer_node = layer_node->next; 1460 continue; 1461 } 1462 props = loader_get_next_layer_property(inst, layer_instance_list); 1463 props->type = (is_implicit) ? VK_LAYER_TYPE_INSTANCE_IMPLICIT : VK_LAYER_TYPE_INSTANCE_EXPLICIT; 1464 } 1465 if (!strcmp(type, "GLOBAL")) { 1466 if (layer_instance_list != NULL) 1467 props = loader_get_next_layer_property(inst, layer_instance_list); 1468 else if (layer_device_list != NULL) 1469 props = loader_get_next_layer_property(inst, layer_device_list); 1470 else { 1471 layer_node = layer_node->next; 1472 continue; 1473 } 1474 props->type = (is_implicit) ? VK_LAYER_TYPE_GLOBAL_IMPLICIT : VK_LAYER_TYPE_GLOBAL_EXPLICIT; 1475 } 1476 1477 if (props == NULL) { 1478 layer_node = layer_node->next; 1479 continue; 1480 } 1481 1482 strncpy(props->info.layerName, name, sizeof (props->info.layerName)); 1483 props->info.layerName[sizeof (props->info.layerName) - 1] = '\0'; 1484 1485 char *fullpath = props->lib_name; 1486 char *rel_base; 1487 if (strchr(library_path, DIRECTORY_SYMBOL) == NULL) { 1488 // a filename which is assumed in the system directory 1489 char *def_path = loader_stack_alloc(strlen(DEFAULT_VK_LAYERS_PATH) + 1); 1490 strcpy(def_path, DEFAULT_VK_LAYERS_PATH); 1491 loader_get_fullpath(library_path, def_path, MAX_STRING_SIZE, fullpath); 1492 } else { 1493 // a relative or absolute path 1494 char *name_copy = loader_stack_alloc(strlen(filename) + 2); 1495 size_t len; 1496 strcpy(name_copy, filename); 1497 rel_base = loader_platform_dirname(name_copy); 1498 len = strlen(rel_base); 1499 rel_base[len] = DIRECTORY_SYMBOL; 1500 rel_base[len + 1] = '\0'; 1501 loader_expand_path(library_path, rel_base, MAX_STRING_SIZE, fullpath); 1502 } 1503 props->info.specVersion = loader_make_version(abi_versions); 1504 props->info.implVersion = loader_make_version(implementation_version); 1505 strncpy((char *) props->info.description, description, sizeof (props->info.description)); 1506 props->info.description[sizeof (props->info.description) - 1] = '\0'; 1507 if (is_implicit) { 1508 strncpy(props->disable_env_var.name, disable_environment->child->string, sizeof (props->disable_env_var.name)); 1509 props->disable_env_var.name[sizeof (props->disable_env_var.name) - 1] = '\0'; 1510 strncpy(props->disable_env_var.value, disable_environment->child->valuestring, sizeof (props->disable_env_var.value)); 1511 props->disable_env_var.value[sizeof (props->disable_env_var.value) - 1] = '\0'; 1512 } 1513 1514 /** 1515 * Now get all optional items and objects and put in list: 1516 * functions 1517 * instance_extensions 1518 * device_extensions 1519 * enable_environment (implicit layers only) 1520 */ 1521#define GET_JSON_OBJECT(node, var) { \ 1522 var = cJSON_GetObjectItem(node, #var); \ 1523 } 1524#define GET_JSON_ITEM(node, var) { \ 1525 item = cJSON_GetObjectItem(node, #var); \ 1526 if (item != NULL) { \ 1527 temp = cJSON_Print(item); \ 1528 temp[strlen(temp) - 1] = '\0'; \ 1529 var = loader_stack_alloc(strlen(temp) + 1);\ 1530 strcpy(var, &temp[1]); \ 1531 loader_tls_heap_free(temp); \ 1532 } \ 1533 } 1534 1535 cJSON *instance_extensions, *device_extensions, *functions, *enable_environment; 1536 char *vkGetInstanceProcAddr = NULL, *vkGetDeviceProcAddr = NULL, *version=NULL; 1537 GET_JSON_OBJECT(layer_node, functions) 1538 if (functions != NULL) { 1539 GET_JSON_ITEM(functions, vkGetInstanceProcAddr) 1540 GET_JSON_ITEM(functions, vkGetDeviceProcAddr) 1541 if (vkGetInstanceProcAddr != NULL) 1542 strncpy(props->functions.str_gipa, vkGetInstanceProcAddr, sizeof (props->functions.str_gipa)); 1543 props->functions.str_gipa[sizeof (props->functions.str_gipa) - 1] = '\0'; 1544 if (vkGetDeviceProcAddr != NULL) 1545 strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr, sizeof (props->functions.str_gdpa)); 1546 props->functions.str_gdpa[sizeof (props->functions.str_gdpa) - 1] = '\0'; 1547 } 1548 GET_JSON_OBJECT(layer_node, instance_extensions) 1549 if (instance_extensions != NULL) { 1550 int count = cJSON_GetArraySize(instance_extensions); 1551 for (i = 0; i < count; i++) { 1552 ext_item = cJSON_GetArrayItem(instance_extensions, i); 1553 GET_JSON_ITEM(ext_item, name) 1554 GET_JSON_ITEM(ext_item, version) 1555 strncpy(ext_prop.extName, name, sizeof (ext_prop.extName)); 1556 ext_prop.extName[sizeof (ext_prop.extName) - 1] = '\0'; 1557 ext_prop.specVersion = loader_make_version(version); 1558 loader_add_to_ext_list(inst, &props->instance_extension_list, 1, &ext_prop); 1559 } 1560 } 1561 GET_JSON_OBJECT(layer_node, device_extensions) 1562 if (device_extensions != NULL) { 1563 int count = cJSON_GetArraySize(device_extensions); 1564 for (i = 0; i < count; i++) { 1565 ext_item = cJSON_GetArrayItem(device_extensions, i); 1566 GET_JSON_ITEM(ext_item, name); 1567 GET_JSON_ITEM(ext_item, version); 1568 strncpy(ext_prop.extName, name, sizeof (ext_prop.extName)); 1569 ext_prop.extName[sizeof (ext_prop.extName) - 1] = '\0'; 1570 ext_prop.specVersion = loader_make_version(version); 1571 loader_add_to_ext_list(inst, &props->device_extension_list, 1, &ext_prop); 1572 } 1573 } 1574 if (is_implicit) { 1575 GET_JSON_OBJECT(layer_node, enable_environment) 1576 strncpy(props->enable_env_var.name, enable_environment->child->string, sizeof (props->enable_env_var.name)); 1577 props->enable_env_var.name[sizeof (props->enable_env_var.name) - 1] = '\0'; 1578 strncpy(props->enable_env_var.value, enable_environment->child->valuestring, sizeof (props->enable_env_var.value)); 1579 props->enable_env_var.value[sizeof (props->enable_env_var.value) - 1] = '\0'; 1580 } 1581#undef GET_JSON_ITEM 1582#undef GET_JSON_OBJECT 1583 // for global layers need to add them to both device and instance list 1584 if (!strcmp(type, "GLOBAL")) { 1585 struct loader_layer_properties *dev_props; 1586 if (layer_instance_list == NULL || layer_device_list == NULL) { 1587 layer_node = layer_node->next; 1588 continue; 1589 } 1590 dev_props = loader_get_next_layer_property(inst, layer_device_list); 1591 //copy into device layer list 1592 loader_copy_layer_properties(inst, dev_props, props); 1593 } 1594 layer_node = layer_node->next; 1595 } while (layer_node != NULL); 1596 return; 1597} 1598 1599/** 1600 * Find the Vulkan library manifest files. 1601 * 1602 * This function scans the location or env_override directories/files 1603 * for a list of JSON manifest files. If env_override is non-NULL 1604 * and has a valid value. Then the location is ignored. Otherwise 1605 * location is used to look for manifest files. The location 1606 * is interpreted as Registry path on Windows and a directory path(s) 1607 * on Linux. 1608 * 1609 * \returns 1610 * A string list of manifest files to be opened in out_files param. 1611 * List has a pointer to string for each manifest filename. 1612 * When done using the list in out_files, pointers should be freed. 1613 * Location or override string lists can be either files or directories as follows: 1614 * | location | override 1615 * -------------------------------- 1616 * Win ICD | files | files 1617 * Win Layer | files | dirs 1618 * Linux ICD | dirs | files 1619 * Linux Layer| dirs | dirs 1620 */ 1621static void loader_get_manifest_files(const struct loader_instance *inst, 1622 const char *env_override, 1623 bool is_layer, 1624 const char *location, 1625 struct loader_manifest_files *out_files) 1626{ 1627 char *override = NULL; 1628 char *loc; 1629 char *file, *next_file, *name; 1630 size_t alloced_count = 64; 1631 char full_path[2048]; 1632 DIR *sysdir = NULL; 1633 bool list_is_dirs = false; 1634 struct dirent *dent; 1635 1636 out_files->count = 0; 1637 out_files->filename_list = NULL; 1638 1639 if (env_override != NULL && (override = getenv(env_override))) { 1640#if defined(__linux__) 1641 if (geteuid() != getuid()) { 1642 /* Don't allow setuid apps to use the env var: */ 1643 override = NULL; 1644 } 1645#endif 1646 } 1647 1648 if (location == NULL) { 1649 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 1650 "Can't get manifest files with NULL location, env_override=%s", 1651 env_override); 1652 return; 1653 } 1654 1655#if defined(__linux__) 1656 list_is_dirs = (override == NULL || is_layer) ? true : false; 1657#else //WIN32 1658 list_is_dirs = (is_layer && override != NULL) ? true : false; 1659#endif 1660 // Make a copy of the input we are using so it is not modified 1661 // Also handle getting the location(s) from registry on Windows 1662 if (override == NULL) { 1663 loc = loader_stack_alloc(strlen(location) + 1); 1664 if (loc == NULL) { 1665 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1666 return; 1667 } 1668 strcpy(loc, location); 1669#if defined (_WIN32) 1670 loc = loader_get_registry_files(inst, loc); 1671 if (loc == NULL) { 1672 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Registry lookup failed can't get manifest files"); 1673 return; 1674 } 1675#endif 1676 } 1677 else { 1678 loc = loader_stack_alloc(strlen(override) + 1); 1679 if (loc == NULL) { 1680 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1681 return; 1682 } 1683 strcpy(loc, override); 1684 } 1685 1686 // Print out the paths being searched if debugging is enabled 1687 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Searching the following paths for manifest files: %s\n", loc); 1688 1689 file = loc; 1690 while (*file) { 1691 next_file = loader_get_next_path(file); 1692 if (list_is_dirs) { 1693 sysdir = opendir(file); 1694 name = NULL; 1695 if (sysdir) { 1696 dent = readdir(sysdir); 1697 if (dent == NULL) 1698 break; 1699 name = &(dent->d_name[0]); 1700 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1701 name = full_path; 1702 } 1703 } 1704 else { 1705#if defined(__linux__) 1706 // only Linux has relative paths 1707 char *dir; 1708 // make a copy of location so it isn't modified 1709 dir = loader_stack_alloc(strlen(location) + 1); 1710 if (dir == NULL) { 1711 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1712 return; 1713 } 1714 strcpy(dir, location); 1715 1716 loader_get_fullpath(file, dir, sizeof(full_path), full_path); 1717 1718 name = full_path; 1719#else // WIN32 1720 name = file; 1721#endif 1722 } 1723 while (name) { 1724 /* Look for files ending with ".json" suffix */ 1725 uint32_t nlen = (uint32_t) strlen(name); 1726 const char *suf = name + nlen - 5; 1727 if ((nlen > 5) && !strncmp(suf, ".json", 5)) { 1728 if (out_files->count == 0) { 1729 out_files->filename_list = loader_heap_alloc(inst, 1730 alloced_count * sizeof(char *), 1731 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1732 } 1733 else if (out_files->count == alloced_count) { 1734 out_files->filename_list = loader_heap_realloc(inst, 1735 out_files->filename_list, 1736 alloced_count * sizeof(char *), 1737 alloced_count * sizeof(char *) * 2, 1738 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1739 alloced_count *= 2; 1740 } 1741 if (out_files->filename_list == NULL) { 1742 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't alloc manifest file list"); 1743 return; 1744 } 1745 out_files->filename_list[out_files->count] = loader_heap_alloc( 1746 inst, 1747 strlen(name) + 1, 1748 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 1749 if (out_files->filename_list[out_files->count] == NULL) { 1750 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Out of memory can't get manifest files"); 1751 return; 1752 } 1753 strcpy(out_files->filename_list[out_files->count], name); 1754 out_files->count++; 1755 } else if (!list_is_dirs) { 1756 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Skipping manifest file %s, file name must end in .json", name); 1757 } 1758 if (list_is_dirs) { 1759 dent = readdir(sysdir); 1760 if (dent == NULL) 1761 break; 1762 name = &(dent->d_name[0]); 1763 loader_get_fullpath(name, file, sizeof(full_path), full_path); 1764 name = full_path; 1765 } 1766 else { 1767 break; 1768 } 1769 } 1770 if (sysdir) 1771 closedir(sysdir); 1772 file = next_file; 1773 } 1774 return; 1775} 1776 1777void loader_init_icd_lib_list() 1778{ 1779 1780} 1781 1782void loader_destroy_icd_lib_list() 1783{ 1784 1785} 1786/** 1787 * Try to find the Vulkan ICD driver(s). 1788 * 1789 * This function scans the default system loader path(s) or path 1790 * specified by the \c VK_ICD_FILENAMES environment variable in 1791 * order to find loadable VK ICDs manifest files. From these 1792 * manifest files it finds the ICD libraries. 1793 * 1794 * \returns 1795 * a list of icds that were discovered 1796 */ 1797void loader_icd_scan( 1798 const struct loader_instance *inst, 1799 struct loader_icd_libs *icds) 1800{ 1801 char *file_str; 1802 struct loader_manifest_files manifest_files; 1803 1804 loader_scanned_icd_init(inst, icds); 1805 // Get a list of manifest files for ICDs 1806 loader_get_manifest_files(inst, "VK_ICD_FILENAMES", false, 1807 DEFAULT_VK_DRIVERS_INFO, &manifest_files); 1808 if (manifest_files.count == 0) 1809 return; 1810 loader_platform_thread_lock_mutex(&loader_json_lock); 1811 for (uint32_t i = 0; i < manifest_files.count; i++) { 1812 file_str = manifest_files.filename_list[i]; 1813 if (file_str == NULL) 1814 continue; 1815 1816 cJSON *json; 1817 json = loader_get_json(file_str); 1818 if (!json) 1819 continue; 1820 cJSON *item; 1821 item = cJSON_GetObjectItem(json, "file_format_version"); 1822 if (item == NULL) { 1823 loader_platform_thread_unlock_mutex(&loader_json_lock); 1824 return; 1825 } 1826 char *file_vers = cJSON_Print(item); 1827 loader_log(VK_DBG_REPORT_INFO_BIT, 0, "Found manifest file %s, version %s", 1828 file_str, file_vers); 1829 if (strcmp(file_vers, "\"1.0.0\"") != 0) 1830 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Unexpected manifest file version (expected 1.0.0), may cause errors"); 1831 loader_tls_heap_free(file_vers); 1832 item = cJSON_GetObjectItem(json, "ICD"); 1833 if (item != NULL) { 1834 item = cJSON_GetObjectItem(item, "library_path"); 1835 if (item != NULL) { 1836 char *temp= cJSON_Print(item); 1837 if (!temp || strlen(temp) == 0) { 1838 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 1839 loader_tls_heap_free(temp); 1840 loader_heap_free(inst, file_str); 1841 cJSON_Delete(json); 1842 continue; 1843 } 1844 //strip out extra quotes 1845 temp[strlen(temp) - 1] = '\0'; 1846 char *library_path = loader_stack_alloc(strlen(temp) + 1); 1847 strcpy(library_path, &temp[1]); 1848 loader_tls_heap_free(temp); 1849 if (!library_path || strlen(library_path) == 0) { 1850 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"library_path\" in ICD JSON file %s, skipping", file_str); 1851 loader_heap_free(inst, file_str); 1852 cJSON_Delete(json); 1853 continue; 1854 } 1855 char *fullpath; 1856 uint32_t path_len; 1857 char *rel_base; 1858 // Print out the paths being searched if debugging is enabled 1859 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, "Searching for ICD drivers named %s default dir %s\n", library_path, DEFAULT_VK_DRIVERS_PATH); 1860 if (strchr(library_path, DIRECTORY_SYMBOL) == NULL) { 1861 // a filename which is assumed in the system directory 1862 char *def_path = loader_stack_alloc(strlen(DEFAULT_VK_DRIVERS_PATH) + 1); 1863 strcpy(def_path, DEFAULT_VK_DRIVERS_PATH); 1864 path_len = strlen(DEFAULT_VK_DRIVERS_PATH) + strlen(library_path) + 2; 1865 fullpath = loader_stack_alloc(path_len); 1866#if defined(__linux__) 1867 loader_get_fullpath(library_path, def_path, path_len, fullpath); 1868#else // WIN32 1869 strncpy(fullpath, library_path, sizeof (fullpath)); 1870 fullpath[sizeof (fullpath) - 1] = '\0'; 1871#endif 1872 } else { 1873 // a relative or absolute path 1874 char *name_copy = loader_stack_alloc(strlen(file_str) + 2); 1875 size_t len; 1876 strcpy(name_copy, file_str); 1877 rel_base = loader_platform_dirname(name_copy); 1878 len = strlen(rel_base); 1879 rel_base[len] = DIRECTORY_SYMBOL; 1880 rel_base[len + 1] = '\0'; 1881 path_len = strlen(rel_base) + strlen(library_path) + 2; 1882 fullpath = loader_stack_alloc(path_len); 1883 loader_expand_path(library_path, rel_base, path_len, fullpath); 1884 } 1885 loader_scanned_icd_add(inst, icds, fullpath); 1886 } 1887 1888 } 1889 else 1890 loader_log(VK_DBG_REPORT_WARN_BIT, 0, "Can't find \"ICD\" object in ICD JSON file %s, skipping", file_str); 1891 1892 loader_heap_free(inst, file_str); 1893 cJSON_Delete(json); 1894 } 1895 loader_heap_free(inst, manifest_files.filename_list); 1896 loader_platform_thread_unlock_mutex(&loader_json_lock); 1897} 1898 1899 1900void loader_layer_scan( 1901 const struct loader_instance *inst, 1902 struct loader_layer_list *instance_layers, 1903 struct loader_layer_list *device_layers) 1904{ 1905 char *file_str; 1906 struct loader_manifest_files manifest_files; 1907 cJSON *json; 1908 uint32_t i; 1909 1910 // Get a list of manifest files for layers 1911 loader_get_manifest_files(inst, LAYERS_PATH_ENV, true, DEFAULT_VK_LAYERS_INFO, 1912 &manifest_files); 1913 if (manifest_files.count == 0) 1914 return; 1915 1916#if 0 //TODO 1917 /** 1918 * We need a list of the layer libraries, not just a list of 1919 * the layer properties (a layer library could expose more than 1920 * one layer property). This list of scanned layers would be 1921 * used to check for global and physicaldevice layer properties. 1922 */ 1923 if (!loader_init_layer_library_list(&loader.scanned_layer_libraries)) { 1924 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 1925 "Alloc for layer list failed: %s line: %d", __FILE__, __LINE__); 1926 return; 1927 } 1928#endif 1929 1930 /* cleanup any previously scanned libraries */ 1931 loader_delete_layer_properties(inst, instance_layers); 1932 loader_delete_layer_properties(inst, device_layers); 1933 1934 loader_platform_thread_lock_mutex(&loader_json_lock); 1935 for (i = 0; i < manifest_files.count; i++) { 1936 file_str = manifest_files.filename_list[i]; 1937 if (file_str == NULL) 1938 continue; 1939 1940 // parse file into JSON struct 1941 json = loader_get_json(file_str); 1942 if (!json) { 1943 continue; 1944 } 1945 1946 //TODO pass in implicit versus explicit bool 1947 //TODO error if device layers expose instance_extensions 1948 //TODO error if instance layers expose device extensions 1949 loader_add_layer_properties(inst, 1950 instance_layers, 1951 device_layers, 1952 json, 1953 false, 1954 file_str); 1955 1956 loader_heap_free(inst, file_str); 1957 cJSON_Delete(json); 1958 } 1959 loader_heap_free(inst, manifest_files.filename_list); 1960 loader_platform_thread_unlock_mutex(&loader_json_lock); 1961} 1962 1963static PFN_vkVoidFunction VKAPI loader_gpa_instance_internal(VkInstance inst, const char * pName) 1964{ 1965 // inst is not wrapped 1966 if (inst == VK_NULL_HANDLE) { 1967 return NULL; 1968 } 1969 VkLayerInstanceDispatchTable* disp_table = * (VkLayerInstanceDispatchTable **) inst; 1970 void *addr; 1971 1972 if (!strcmp(pName, "vkGetInstanceProcAddr")) 1973 return (void *) loader_gpa_instance_internal; 1974 1975 if (disp_table == NULL) 1976 return NULL; 1977 1978 addr = loader_lookup_instance_dispatch_table(disp_table, pName); 1979 if (addr) { 1980 return addr; 1981 } 1982 1983 if (disp_table->GetInstanceProcAddr == NULL) { 1984 return NULL; 1985 } 1986 return disp_table->GetInstanceProcAddr(inst, pName); 1987} 1988 1989struct loader_icd * loader_get_icd(const VkPhysicalDevice gpu, uint32_t *gpu_index) 1990{ 1991 1992 *gpu_index = 0; 1993 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 1994 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) { 1995 for (uint32_t i = 0; i < icd->gpu_count; i++) 1996 if (icd->gpus[i] == gpu) { 1997 *gpu_index = i; 1998 return icd; 1999 } 2000 } 2001 } 2002 return NULL; 2003} 2004 2005static loader_platform_dl_handle loader_add_layer_lib( 2006 const struct loader_instance *inst, 2007 const char *chain_type, 2008 struct loader_layer_properties *layer_prop) 2009{ 2010 struct loader_lib_info *new_layer_lib_list, *my_lib; 2011 size_t new_alloc_size; 2012 /* 2013 * TODO: We can now track this information in the 2014 * scanned_layer_libraries list. 2015 */ 2016 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2017 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2018 /* Have already loaded this library, just increment ref count */ 2019 loader.loaded_layer_lib_list[i].ref_count++; 2020 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2021 "%s Chain: Increment layer reference count for layer library %s", 2022 chain_type, layer_prop->lib_name); 2023 return loader.loaded_layer_lib_list[i].lib_handle; 2024 } 2025 } 2026 2027 /* Haven't seen this library so load it */ 2028 new_alloc_size = 0; 2029 if (loader.loaded_layer_lib_capacity == 0) 2030 new_alloc_size = 8 * sizeof(struct loader_lib_info); 2031 else if (loader.loaded_layer_lib_capacity <= loader.loaded_layer_lib_count * 2032 sizeof(struct loader_lib_info)) 2033 new_alloc_size = loader.loaded_layer_lib_capacity * 2; 2034 2035 if (new_alloc_size) { 2036 new_layer_lib_list = loader_heap_realloc( 2037 inst, loader.loaded_layer_lib_list, 2038 loader.loaded_layer_lib_capacity, 2039 new_alloc_size, 2040 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 2041 if (!new_layer_lib_list) { 2042 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "loader: realloc failed in loader_add_layer_lib"); 2043 return NULL; 2044 } 2045 loader.loaded_layer_lib_capacity = new_alloc_size; 2046 } else 2047 new_layer_lib_list = loader.loaded_layer_lib_list; 2048 my_lib = &new_layer_lib_list[loader.loaded_layer_lib_count]; 2049 2050 strncpy(my_lib->lib_name, layer_prop->lib_name, sizeof(my_lib->lib_name)); 2051 my_lib->lib_name[sizeof(my_lib->lib_name) - 1] = '\0'; 2052 my_lib->ref_count = 0; 2053 my_lib->lib_handle = NULL; 2054 2055 if ((my_lib->lib_handle = loader_platform_open_library(my_lib->lib_name)) == NULL) { 2056 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2057 loader_platform_open_library_error(my_lib->lib_name)); 2058 return NULL; 2059 } else { 2060 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2061 "Chain: %s: Loading layer library %s", 2062 chain_type, layer_prop->lib_name); 2063 } 2064 loader.loaded_layer_lib_count++; 2065 loader.loaded_layer_lib_list = new_layer_lib_list; 2066 my_lib->ref_count++; 2067 2068 return my_lib->lib_handle; 2069} 2070 2071static void loader_remove_layer_lib( 2072 struct loader_instance *inst, 2073 struct loader_layer_properties *layer_prop) 2074{ 2075 uint32_t idx; 2076 struct loader_lib_info *new_layer_lib_list, *my_lib = NULL; 2077 2078 for (uint32_t i = 0; i < loader.loaded_layer_lib_count; i++) { 2079 if (strcmp(loader.loaded_layer_lib_list[i].lib_name, layer_prop->lib_name) == 0) { 2080 /* found matching library */ 2081 idx = i; 2082 my_lib = &loader.loaded_layer_lib_list[i]; 2083 break; 2084 } 2085 } 2086 2087 if (my_lib) { 2088 my_lib->ref_count--; 2089 if (my_lib->ref_count > 0) { 2090 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2091 "Decrement reference count for layer library %s", layer_prop->lib_name); 2092 return; 2093 } 2094 } 2095 loader_platform_close_library(my_lib->lib_handle); 2096 loader_log(VK_DBG_REPORT_DEBUG_BIT, 0, 2097 "Unloading layer library %s", layer_prop->lib_name); 2098 2099 /* Need to remove unused library from list */ 2100 new_layer_lib_list = loader_heap_alloc(inst, 2101 loader.loaded_layer_lib_capacity, 2102 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 2103 if (!new_layer_lib_list) { 2104 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "loader: heap alloc failed loader_remove_layer_library"); 2105 return; 2106 } 2107 2108 if (idx > 0) { 2109 /* Copy records before idx */ 2110 memcpy(new_layer_lib_list, &loader.loaded_layer_lib_list[0], 2111 sizeof(struct loader_lib_info) * idx); 2112 } 2113 if (idx < (loader.loaded_layer_lib_count - 1)) { 2114 /* Copy records after idx */ 2115 memcpy(&new_layer_lib_list[idx], &loader.loaded_layer_lib_list[idx+1], 2116 sizeof(struct loader_lib_info) * (loader.loaded_layer_lib_count - idx - 1)); 2117 } 2118 2119 loader_heap_free(inst, loader.loaded_layer_lib_list); 2120 loader.loaded_layer_lib_count--; 2121 loader.loaded_layer_lib_list = new_layer_lib_list; 2122} 2123 2124 2125/** 2126 * Go through the search_list and find any layers which match type. If layer 2127 * type match is found in then add it to ext_list. 2128 */ 2129//TODO need to handle implict layer enable env var and disable env var 2130static void loader_add_layer_implicit( 2131 const struct loader_instance *inst, 2132 const enum layer_type type, 2133 struct loader_layer_list *list, 2134 const struct loader_layer_list *search_list) 2135{ 2136 uint32_t i; 2137 for (i = 0; i < search_list->count; i++) { 2138 const struct loader_layer_properties *prop = &search_list->list[i]; 2139 if (prop->type & type) { 2140 /* Found an layer with the same type, add to layer_list */ 2141 loader_add_to_layer_list(inst, list, 1, prop); 2142 } 2143 } 2144 2145} 2146 2147/** 2148 * Get the layer name(s) from the env_name environment variable. If layer 2149 * is found in search_list then add it to layer_list. But only add it to 2150 * layer_list if type matches. 2151 */ 2152static void loader_add_layer_env( 2153 const struct loader_instance *inst, 2154 const enum layer_type type, 2155 const char *env_name, 2156 struct loader_layer_list *layer_list, 2157 const struct loader_layer_list *search_list) 2158{ 2159 char *layerEnv; 2160 char *next, *name; 2161 2162 layerEnv = getenv(env_name); 2163 if (layerEnv == NULL) { 2164 return; 2165 } 2166 name = loader_stack_alloc(strlen(layerEnv) + 1); 2167 if (name == NULL) { 2168 return; 2169 } 2170 strcpy(name, layerEnv); 2171 2172 while (name && *name ) { 2173 next = loader_get_next_path(name); 2174 loader_find_layer_name_add_list(inst, name, type, search_list, layer_list); 2175 name = next; 2176 } 2177 2178 return; 2179} 2180 2181void loader_deactivate_instance_layers(struct loader_instance *instance) 2182{ 2183 if (!instance->activated_layer_list.count) { 2184 return; 2185 } 2186 2187 /* Create instance chain of enabled layers */ 2188 for (uint32_t i = 0; i < instance->activated_layer_list.count; i++) { 2189 struct loader_layer_properties *layer_prop = &instance->activated_layer_list.list[i]; 2190 2191 loader_remove_layer_lib(instance, layer_prop); 2192 } 2193 loader_destroy_layer_list(instance, &instance->activated_layer_list); 2194} 2195 2196VkResult loader_enable_instance_layers( 2197 struct loader_instance *inst, 2198 const VkInstanceCreateInfo *pCreateInfo, 2199 const struct loader_layer_list *instance_layers) 2200{ 2201 VkResult err; 2202 2203 assert(inst && "Cannot have null instance"); 2204 2205 if (!loader_init_layer_list(inst, &inst->activated_layer_list)) { 2206 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Instance activated layer list"); 2207 return VK_ERROR_OUT_OF_HOST_MEMORY; 2208 } 2209 2210 /* Add any implicit layers first */ 2211 loader_add_layer_implicit( 2212 inst, 2213 VK_LAYER_TYPE_INSTANCE_IMPLICIT, 2214 &inst->activated_layer_list, 2215 instance_layers); 2216 2217 /* Add any layers specified via environment variable next */ 2218 loader_add_layer_env( 2219 inst, 2220 VK_LAYER_TYPE_INSTANCE_EXPLICIT, 2221 "VK_INSTANCE_LAYERS", 2222 &inst->activated_layer_list, 2223 instance_layers); 2224 2225 /* Add layers specified by the application */ 2226 err = loader_add_layer_names_to_list( 2227 inst, 2228 &inst->activated_layer_list, 2229 pCreateInfo->layerCount, 2230 pCreateInfo->ppEnabledLayerNames, 2231 instance_layers); 2232 2233 return err; 2234} 2235 2236uint32_t loader_activate_instance_layers(struct loader_instance *inst) 2237{ 2238 uint32_t layer_idx; 2239 VkBaseLayerObject *wrappedInstance; 2240 2241 if (inst == NULL) { 2242 return 0; 2243 } 2244 2245 // NOTE inst is unwrapped at this point in time 2246 void* baseObj = (void*) inst; 2247 void* nextObj = (void*) inst; 2248 VkBaseLayerObject *nextInstObj; 2249 PFN_vkGetInstanceProcAddr nextGPA = loader_gpa_instance_internal; 2250 2251 if (!inst->activated_layer_list.count) { 2252 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2253 return 0; 2254 } 2255 2256 wrappedInstance = loader_stack_alloc(sizeof(VkBaseLayerObject) 2257 * inst->activated_layer_list.count); 2258 if (!wrappedInstance) { 2259 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Instance objects for layer"); 2260 return 0; 2261 } 2262 2263 /* Create instance chain of enabled layers */ 2264 layer_idx = inst->activated_layer_list.count - 1; 2265 for (int32_t i = inst->activated_layer_list.count - 1; i >= 0; i--) { 2266 struct loader_layer_properties *layer_prop = &inst->activated_layer_list.list[i]; 2267 loader_platform_dl_handle lib_handle; 2268 2269 /* 2270 * Note: An extension's Get*ProcAddr should not return a function pointer for 2271 * any extension entry points until the extension has been enabled. 2272 * To do this requires a different behavior from Get*ProcAddr functions implemented 2273 * in layers. 2274 * The very first call to a layer will be it's Get*ProcAddr function requesting 2275 * the layer's vkGet*ProcAddr. The layer should intialize it's internal dispatch table 2276 * with the wrapped object given (either Instance or Device) and return the layer's 2277 * Get*ProcAddr function. The layer should also use this opportunity to record the 2278 * baseObject so that it can find the correct local dispatch table on future calls. 2279 * Subsequent calls to Get*ProcAddr, CreateInstance, CreateDevice 2280 * will not use a wrapped object and must look up their local dispatch table from 2281 * the given baseObject. 2282 */ 2283 nextInstObj = (wrappedInstance + layer_idx); 2284 nextInstObj->pGPA = (PFN_vkGPA) nextGPA; 2285 nextInstObj->baseObject = baseObj; 2286 nextInstObj->nextObject = nextObj; 2287 nextObj = (void*) nextInstObj; 2288 2289 lib_handle = loader_add_layer_lib(inst, "instance", layer_prop); 2290 if ((nextGPA = layer_prop->functions.get_instance_proc_addr) == NULL) { 2291 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) { 2292 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); 2293 layer_prop->functions.get_instance_proc_addr = nextGPA; 2294 } else 2295 nextGPA = (PFN_vkGetInstanceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa); 2296 if (!nextGPA) { 2297 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to find vkGetInstanceProcAddr in layer %s", layer_prop->lib_name); 2298 2299 /* TODO: Should we return nextObj, nextGPA to previous? or decrement layer_list count*/ 2300 continue; 2301 } 2302 } 2303 2304 loader_log(VK_DBG_REPORT_INFO_BIT, 0, 2305 "Insert instance layer %s (%s)", 2306 layer_prop->info.layerName, 2307 layer_prop->lib_name); 2308 2309 layer_idx--; 2310 } 2311 2312 loader_init_instance_core_dispatch_table(inst->disp, nextGPA, (VkInstance) nextObj, (VkInstance) baseObj); 2313 2314 return inst->activated_layer_list.count; 2315} 2316 2317void loader_activate_instance_layer_extensions(struct loader_instance *inst) 2318{ 2319 2320 loader_init_instance_extension_dispatch_table(inst->disp, 2321 inst->disp->GetInstanceProcAddr, 2322 (VkInstance) inst); 2323} 2324 2325static VkResult loader_enable_device_layers( 2326 const struct loader_instance *inst, 2327 struct loader_icd *icd, 2328 struct loader_device *dev, 2329 const VkDeviceCreateInfo *pCreateInfo, 2330 const struct loader_layer_list *device_layers) 2331 2332{ 2333 VkResult err; 2334 2335 assert(dev && "Cannot have null device"); 2336 2337 if (dev->activated_layer_list.list == NULL || dev->activated_layer_list.capacity == 0) { 2338 loader_init_layer_list(inst, &dev->activated_layer_list); 2339 } 2340 2341 if (dev->activated_layer_list.list == NULL) { 2342 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc device activated layer list"); 2343 return VK_ERROR_OUT_OF_HOST_MEMORY; 2344 } 2345 2346 /* Add any implicit layers first */ 2347 loader_add_layer_implicit( 2348 inst, 2349 VK_LAYER_TYPE_DEVICE_IMPLICIT, 2350 &dev->activated_layer_list, 2351 device_layers); 2352 2353 /* Add any layers specified via environment variable next */ 2354 loader_add_layer_env( 2355 inst, 2356 VK_LAYER_TYPE_DEVICE_EXPLICIT, 2357 "VK_DEVICE_LAYERS", 2358 &dev->activated_layer_list, 2359 device_layers); 2360 2361 /* Add layers specified by the application */ 2362 err = loader_add_layer_names_to_list( 2363 inst, 2364 &dev->activated_layer_list, 2365 pCreateInfo->layerCount, 2366 pCreateInfo->ppEnabledLayerNames, 2367 device_layers); 2368 2369 return err; 2370} 2371 2372/* 2373 * This function terminates the device chain for CreateDevice. 2374 * CreateDevice is a special case and so the loader call's 2375 * the ICD's CreateDevice before creating the chain. Since 2376 * we can't call CreateDevice twice we must terminate the 2377 * device chain with something else. 2378 */ 2379static VkResult VKAPI scratch_vkCreateDevice( 2380 VkPhysicalDevice gpu, 2381 const VkDeviceCreateInfo *pCreateInfo, 2382 VkDevice *pDevice) 2383{ 2384 return VK_SUCCESS; 2385} 2386 2387static PFN_vkVoidFunction VKAPI loader_GetDeviceChainProcAddr(VkDevice device, const char * name) 2388{ 2389 if (!strcmp(name, "vkGetDeviceProcAddr")) 2390 return (PFN_vkVoidFunction) loader_GetDeviceChainProcAddr; 2391 if (!strcmp(name, "vkCreateDevice")) 2392 return (PFN_vkVoidFunction) scratch_vkCreateDevice; 2393 2394 struct loader_device *found_dev; 2395 struct loader_icd *icd = loader_get_icd_and_device(device, &found_dev); 2396 return icd->GetDeviceProcAddr(device, name); 2397} 2398 2399static uint32_t loader_activate_device_layers( 2400 const struct loader_instance *inst, 2401 struct loader_device *dev, 2402 VkDevice device) 2403{ 2404 if (!dev) { 2405 return 0; 2406 } 2407 2408 /* activate any layer libraries */ 2409 void* nextObj = (void*) device; 2410 void* baseObj = nextObj; 2411 VkBaseLayerObject *nextGpuObj; 2412 PFN_vkGetDeviceProcAddr nextGPA = loader_GetDeviceChainProcAddr; 2413 VkBaseLayerObject *wrappedGpus; 2414 2415 if (!dev->activated_layer_list.count) { 2416 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2417 (VkDevice) nextObj, (VkDevice) baseObj); 2418 return 0; 2419 } 2420 2421 wrappedGpus = loader_heap_alloc(inst, 2422 sizeof (VkBaseLayerObject) * dev->activated_layer_list.count, 2423 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 2424 if (!wrappedGpus) { 2425 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to alloc Gpu objects for layer"); 2426 return 0; 2427 } 2428 2429 for (int32_t i = dev->activated_layer_list.count - 1; i >= 0; i--) { 2430 2431 struct loader_layer_properties *layer_prop = &dev->activated_layer_list.list[i]; 2432 loader_platform_dl_handle lib_handle; 2433 2434 nextGpuObj = (wrappedGpus + i); 2435 nextGpuObj->pGPA = (PFN_vkGPA)nextGPA; 2436 nextGpuObj->baseObject = baseObj; 2437 nextGpuObj->nextObject = nextObj; 2438 nextObj = (void*) nextGpuObj; 2439 2440 lib_handle = loader_add_layer_lib(inst, "device", layer_prop); 2441 if ((nextGPA = layer_prop->functions.get_device_proc_addr) == NULL) { 2442 if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) { 2443 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr"); 2444 layer_prop->functions.get_device_proc_addr = nextGPA; 2445 } else 2446 nextGPA = (PFN_vkGetDeviceProcAddr) loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa); 2447 if (!nextGPA) { 2448 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to find vkGetDeviceProcAddr in layer %s", layer_prop->lib_name); 2449 continue; 2450 } 2451 } 2452 2453 loader_log(VK_DBG_REPORT_INFO_BIT, 0, 2454 "Insert device layer library %s (%s)", 2455 layer_prop->info.layerName, 2456 layer_prop->lib_name); 2457 2458 } 2459 2460 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGPA, 2461 (VkDevice) nextObj, (VkDevice) baseObj); 2462 loader_heap_free(inst, wrappedGpus); 2463 2464 return dev->activated_layer_list.count; 2465} 2466 2467VkResult loader_validate_layers( 2468 const uint32_t layer_count, 2469 const char * const *ppEnabledLayerNames, 2470 const struct loader_layer_list *list) 2471{ 2472 struct loader_layer_properties *prop; 2473 2474 for (uint32_t i = 0; i < layer_count; i++) { 2475 prop = loader_get_layer_property(ppEnabledLayerNames[i], 2476 list); 2477 if (!prop) { 2478 return VK_ERROR_LAYER_NOT_PRESENT; 2479 } 2480 } 2481 2482 return VK_SUCCESS; 2483} 2484 2485VkResult loader_validate_instance_extensions( 2486 const struct loader_extension_list *icd_exts, 2487 const struct loader_layer_list *instance_layer, 2488 const VkInstanceCreateInfo *pCreateInfo) 2489{ 2490 VkExtensionProperties *extension_prop; 2491 struct loader_layer_properties *layer_prop; 2492 2493 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) { 2494 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2495 icd_exts); 2496 2497 if (extension_prop) { 2498 continue; 2499 } 2500 2501 extension_prop = NULL; 2502 2503 /* Not in global list, search layer extension lists */ 2504 for (uint32_t j = 0; j < pCreateInfo->layerCount; j++) { 2505 layer_prop = loader_get_layer_property(pCreateInfo->ppEnabledLayerNames[i], 2506 instance_layer); 2507 if (!layer_prop) { 2508 /* Should NOT get here, loader_validate_layers 2509 * should have already filtered this case out. 2510 */ 2511 continue; 2512 } 2513 2514 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2515 &layer_prop->instance_extension_list); 2516 if (extension_prop) { 2517 /* Found the extension in one of the layers enabled by the app. */ 2518 break; 2519 } 2520 } 2521 2522 if (!extension_prop) { 2523 /* Didn't find extension name in any of the global layers, error out */ 2524 return VK_ERROR_EXTENSION_NOT_PRESENT; 2525 } 2526 } 2527 return VK_SUCCESS; 2528} 2529 2530VkResult loader_validate_device_extensions( 2531 struct loader_icd *icd, 2532 uint32_t gpu_index, 2533 const struct loader_layer_list *device_layer, 2534 const VkDeviceCreateInfo *pCreateInfo) 2535{ 2536 VkExtensionProperties *extension_prop; 2537 struct loader_layer_properties *layer_prop; 2538 2539 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) { 2540 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 2541 extension_prop = get_extension_property(extension_name, 2542 &icd->device_extension_cache[gpu_index]); 2543 2544 if (extension_prop) { 2545 continue; 2546 } 2547 2548 /* Not in global list, search layer extension lists */ 2549 for (uint32_t j = 0; j < pCreateInfo->layerCount; j++) { 2550 const char *layer_name = pCreateInfo->ppEnabledLayerNames[j]; 2551 layer_prop = loader_get_layer_property(layer_name, 2552 device_layer); 2553 2554 if (!layer_prop) { 2555 /* Should NOT get here, loader_validate_instance_layers 2556 * should have already filtered this case out. 2557 */ 2558 continue; 2559 } 2560 2561 extension_prop = get_extension_property(extension_name, 2562 &layer_prop->device_extension_list); 2563 if (extension_prop) { 2564 /* Found the extension in one of the layers enabled by the app. */ 2565 break; 2566 } 2567 } 2568 2569 if (!extension_prop) { 2570 /* Didn't find extension name in any of the device layers, error out */ 2571 return VK_ERROR_EXTENSION_NOT_PRESENT; 2572 } 2573 } 2574 return VK_SUCCESS; 2575} 2576 2577VkResult VKAPI loader_CreateInstance( 2578 const VkInstanceCreateInfo* pCreateInfo, 2579 VkInstance* pInstance) 2580{ 2581 struct loader_instance *ptr_instance = *(struct loader_instance **) pInstance; 2582 struct loader_icd *icd; 2583 VkExtensionProperties *prop; 2584 char **filtered_extension_names = NULL; 2585 VkInstanceCreateInfo icd_create_info; 2586 VkResult res = VK_SUCCESS; 2587 bool success; 2588 2589 icd_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; 2590 icd_create_info.layerCount = 0; 2591 icd_create_info.ppEnabledLayerNames = NULL; 2592 icd_create_info.pAllocCb = pCreateInfo->pAllocCb; 2593 icd_create_info.pAppInfo = pCreateInfo->pAppInfo; 2594 icd_create_info.pNext = pCreateInfo->pNext; 2595 2596 /* 2597 * NOTE: Need to filter the extensions to only those 2598 * supported by the ICD. 2599 * No ICD will advertise support for layers. An ICD 2600 * library could support a layer, but it would be 2601 * independent of the actual ICD, just in the same library. 2602 */ 2603 filtered_extension_names = loader_stack_alloc(pCreateInfo->extensionCount * sizeof(char *)); 2604 if (!filtered_extension_names) { 2605 return VK_ERROR_OUT_OF_HOST_MEMORY; 2606 } 2607 icd_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 2608 2609 for (uint32_t i = 0; i < ptr_instance->icd_libs.count; i++) { 2610 icd = loader_icd_add(ptr_instance, &ptr_instance->icd_libs.list[i]); 2611 if (icd) { 2612 icd_create_info.extensionCount = 0; 2613 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) { 2614 prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], 2615 &ptr_instance->ext_list); 2616 if (prop) { 2617 filtered_extension_names[icd_create_info.extensionCount] = (char *) pCreateInfo->ppEnabledExtensionNames[i]; 2618 icd_create_info.extensionCount++; 2619 } 2620 } 2621 2622 res = ptr_instance->icd_libs.list[i].CreateInstance(&icd_create_info, 2623 &(icd->instance)); 2624 success = loader_icd_init_entrys( 2625 icd, 2626 icd->instance, 2627 ptr_instance->icd_libs.list[i].GetInstanceProcAddr); 2628 2629 if (res != VK_SUCCESS || !success) 2630 { 2631 ptr_instance->icds = ptr_instance->icds->next; 2632 loader_icd_destroy(ptr_instance, icd); 2633 icd->instance = VK_NULL_HANDLE; 2634 loader_log(VK_DBG_REPORT_ERROR_BIT, 0, 2635 "ICD ignored: failed to CreateInstance and find entrypoints with ICD"); 2636 } 2637 } 2638 } 2639 2640 /* 2641 * If no ICDs were added to instance list and res is unchanged 2642 * from it's initial value, the loader was unable to find 2643 * a suitable ICD. 2644 */ 2645 if (ptr_instance->icds == NULL) { 2646 if (res == VK_SUCCESS) { 2647 return VK_ERROR_INCOMPATIBLE_DRIVER; 2648 } else { 2649 return res; 2650 } 2651 } 2652 2653 return VK_SUCCESS; 2654} 2655 2656void VKAPI loader_DestroyInstance( 2657 VkInstance instance) 2658{ 2659 struct loader_instance *ptr_instance = loader_instance(instance); 2660 struct loader_icd *icds = ptr_instance->icds; 2661 struct loader_icd *next_icd; 2662 2663 // Remove this instance from the list of instances: 2664 struct loader_instance *prev = NULL; 2665 struct loader_instance *next = loader.instances; 2666 while (next != NULL) { 2667 if (next == ptr_instance) { 2668 // Remove this instance from the list: 2669 if (prev) 2670 prev->next = next->next; 2671 else 2672 loader.instances = next->next; 2673 break; 2674 } 2675 prev = next; 2676 next = next->next; 2677 } 2678 /* TODOVV: Move this test to validation layer */ 2679// if (next == NULL) { 2680// // This must be an invalid instance handle or empty list 2681// return VK_ERROR_INVALID_HANDLE; 2682// } 2683 2684 while (icds) { 2685 if (icds->instance) { 2686 icds->DestroyInstance(icds->instance); 2687 /* TODOVV: Move this test to validation layer */ 2688 //if (res != VK_SUCCESS) 2689 // loader_log(VK_DBG_REPORT_WARN_BIT, 0, 2690 // "ICD ignored: failed to DestroyInstance on device"); 2691 } 2692 next_icd = icds->next; 2693 icds->instance = VK_NULL_HANDLE; 2694 loader_icd_destroy(ptr_instance, icds); 2695 2696 icds = next_icd; 2697 } 2698 loader_delete_layer_properties(ptr_instance, &ptr_instance->device_layer_list); 2699 loader_delete_layer_properties(ptr_instance, &ptr_instance->instance_layer_list); 2700 loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_libs); 2701 loader_destroy_ext_list(ptr_instance, &ptr_instance->ext_list); 2702} 2703 2704VkResult loader_init_physical_device_info( 2705 struct loader_instance *ptr_instance) 2706{ 2707 struct loader_icd *icd; 2708 uint32_t n, count = 0; 2709 VkResult res; 2710 2711 icd = ptr_instance->icds; 2712 while (icd) { 2713 res = icd->EnumeratePhysicalDevices(icd->instance, &n, NULL); 2714 if (res != VK_SUCCESS) 2715 return res; 2716 icd->gpu_count = n; 2717 count += n; 2718 icd = icd->next; 2719 } 2720 2721 ptr_instance->total_gpu_count = count; 2722 2723 icd = ptr_instance->icds; 2724 while (icd) { 2725 2726 n = icd->gpu_count; 2727 icd->gpus = (VkPhysicalDevice *) loader_heap_alloc( 2728 ptr_instance, 2729 n * sizeof(VkPhysicalDevice), 2730 VK_SYSTEM_ALLOC_TYPE_INTERNAL); 2731 if (!icd->gpus) { 2732 /* TODO: Add cleanup code here */ 2733 return VK_ERROR_OUT_OF_HOST_MEMORY; 2734 } 2735 res = icd->EnumeratePhysicalDevices( 2736 icd->instance, 2737 &n, 2738 icd->gpus); 2739 if ((res == VK_SUCCESS) && (n == icd->gpu_count)) { 2740 2741 for (unsigned int i = 0; i < n; i++) { 2742 2743 loader_init_dispatch(icd->gpus[i], ptr_instance->disp); 2744 2745 if (!loader_init_ext_list(ptr_instance, &icd->device_extension_cache[i])) { 2746 /* TODO: Add cleanup code here */ 2747 res = VK_ERROR_OUT_OF_HOST_MEMORY; 2748 } 2749 if (res == VK_SUCCESS) { 2750 2751 loader_add_physical_device_extensions( 2752 ptr_instance, 2753 icd->EnumerateDeviceExtensionProperties, 2754 icd->gpus[0], 2755 icd->this_icd_lib->lib_name, 2756 &icd->device_extension_cache[i]); 2757 2758 } 2759 2760 if (res != VK_SUCCESS) { 2761 /* clean up any extension lists previously created before this request failed */ 2762 for (uint32_t j = 0; j < i; j++) { 2763 loader_destroy_ext_list( 2764 ptr_instance, 2765 &icd->device_extension_cache[i]); 2766 } 2767 2768 return res; 2769 } 2770 } 2771 2772 count += n; 2773 } 2774 2775 icd = icd->next; 2776 } 2777 2778 return VK_SUCCESS; 2779} 2780 2781VkResult VKAPI loader_EnumeratePhysicalDevices( 2782 VkInstance instance, 2783 uint32_t* pPhysicalDeviceCount, 2784 VkPhysicalDevice* pPhysicalDevices) 2785{ 2786 uint32_t index = 0; 2787 struct loader_instance *ptr_instance = (struct loader_instance *) instance; 2788 struct loader_icd *icd = ptr_instance->icds; 2789 2790 if (ptr_instance->total_gpu_count == 0) { 2791 loader_init_physical_device_info(ptr_instance); 2792 } 2793 2794 *pPhysicalDeviceCount = ptr_instance->total_gpu_count; 2795 if (!pPhysicalDevices) { 2796 return VK_SUCCESS; 2797 } 2798 2799 while (icd) { 2800 assert((index + icd->gpu_count) <= *pPhysicalDeviceCount); 2801 memcpy(&pPhysicalDevices[index], icd->gpus, icd->gpu_count * sizeof(VkPhysicalDevice)); 2802 index += icd->gpu_count; 2803 icd = icd->next; 2804 } 2805 2806 return VK_SUCCESS; 2807} 2808 2809VkResult VKAPI loader_GetPhysicalDeviceProperties( 2810 VkPhysicalDevice gpu, 2811 VkPhysicalDeviceProperties* pProperties) 2812{ 2813 uint32_t gpu_index; 2814 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 2815 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2816 2817 if (icd->GetPhysicalDeviceProperties) 2818 res = icd->GetPhysicalDeviceProperties(gpu, pProperties); 2819 2820 return res; 2821} 2822 2823VkResult VKAPI loader_GetPhysicalDeviceQueueFamilyProperties ( 2824 VkPhysicalDevice gpu, 2825 uint32_t* pCount, 2826 VkQueueFamilyProperties* pProperties) 2827{ 2828 uint32_t gpu_index; 2829 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 2830 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2831 2832 if (icd->GetPhysicalDeviceQueueFamilyProperties) 2833 res = icd->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties); 2834 2835 return res; 2836} 2837 2838VkResult VKAPI loader_GetPhysicalDeviceMemoryProperties ( 2839 VkPhysicalDevice gpu, 2840 VkPhysicalDeviceMemoryProperties* pProperties) 2841{ 2842 uint32_t gpu_index; 2843 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 2844 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2845 2846 if (icd->GetPhysicalDeviceMemoryProperties) 2847 res = icd->GetPhysicalDeviceMemoryProperties(gpu, pProperties); 2848 2849 return res; 2850} 2851 2852VkResult VKAPI loader_GetPhysicalDeviceFeatures( 2853 VkPhysicalDevice physicalDevice, 2854 VkPhysicalDeviceFeatures* pFeatures) 2855{ 2856 uint32_t gpu_index; 2857 struct loader_icd *icd = loader_get_icd(physicalDevice, &gpu_index); 2858 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2859 2860 if (icd->GetPhysicalDeviceFeatures) 2861 res = icd->GetPhysicalDeviceFeatures(physicalDevice, pFeatures); 2862 2863 return res; 2864} 2865 2866VkResult VKAPI loader_GetPhysicalDeviceFormatProperties( 2867 VkPhysicalDevice physicalDevice, 2868 VkFormat format, 2869 VkFormatProperties* pFormatInfo) 2870{ 2871 uint32_t gpu_index; 2872 struct loader_icd *icd = loader_get_icd(physicalDevice, &gpu_index); 2873 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2874 2875 if (icd->GetPhysicalDeviceFormatProperties) 2876 res = icd->GetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatInfo); 2877 2878 return res; 2879} 2880 2881VkResult VKAPI loader_GetPhysicalDeviceImageFormatProperties( 2882 VkPhysicalDevice physicalDevice, 2883 VkFormat format, 2884 VkImageType type, 2885 VkImageTiling tiling, 2886 VkImageUsageFlags usage, 2887 VkImageCreateFlags flags, 2888 VkImageFormatProperties* pImageFormatProperties) 2889{ 2890 uint32_t gpu_index; 2891 struct loader_icd *icd = loader_get_icd(physicalDevice, &gpu_index); 2892 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2893 2894 if (icd->GetPhysicalDeviceImageFormatProperties) 2895 res = icd->GetPhysicalDeviceImageFormatProperties(physicalDevice, format, 2896 type, tiling, usage, flags, pImageFormatProperties); 2897 2898 return res; 2899} 2900 2901VkResult VKAPI loader_GetPhysicalDeviceSparseImageFormatProperties( 2902 VkPhysicalDevice physicalDevice, 2903 VkFormat format, 2904 VkImageType type, 2905 uint32_t samples, 2906 VkImageUsageFlags usage, 2907 VkImageTiling tiling, 2908 uint32_t* pNumProperties, 2909 VkSparseImageFormatProperties* pProperties) 2910{ 2911 uint32_t gpu_index; 2912 struct loader_icd *icd = loader_get_icd(physicalDevice, &gpu_index); 2913 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 2914 2915 if (icd->GetPhysicalDeviceSparseImageFormatProperties) 2916 res = icd->GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pNumProperties, pProperties); 2917 2918 return res; 2919} 2920 2921VkResult VKAPI loader_CreateDevice( 2922 VkPhysicalDevice gpu, 2923 const VkDeviceCreateInfo* pCreateInfo, 2924 VkDevice* pDevice) 2925{ 2926 uint32_t gpu_index; 2927 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 2928 struct loader_device *dev; 2929 const struct loader_instance *inst = icd->this_instance; 2930 VkDeviceCreateInfo device_create_info; 2931 char **filtered_extension_names = NULL; 2932 VkResult res; 2933 2934 assert(pCreateInfo->queueRecordCount >= 1); 2935 2936 if (!icd->CreateDevice) { 2937 return VK_ERROR_INITIALIZATION_FAILED; 2938 } 2939 2940 /* validate any app enabled layers are available */ 2941 if (pCreateInfo->layerCount > 0) { 2942 res = loader_validate_layers(pCreateInfo->layerCount, 2943 pCreateInfo->ppEnabledLayerNames, 2944 &inst->device_layer_list); 2945 if (res != VK_SUCCESS) { 2946 return res; 2947 } 2948 } 2949 2950 res = loader_validate_device_extensions(icd, gpu_index, &inst->device_layer_list, pCreateInfo); 2951 if (res != VK_SUCCESS) { 2952 return res; 2953 } 2954 2955 /* 2956 * NOTE: Need to filter the extensions to only those 2957 * supported by the ICD. 2958 * No ICD will advertise support for layers. An ICD 2959 * library could support a layer, but it would be 2960 * independent of the actual ICD, just in the same library. 2961 */ 2962 filtered_extension_names = loader_stack_alloc(pCreateInfo->extensionCount * sizeof(char *)); 2963 if (!filtered_extension_names) { 2964 return VK_ERROR_OUT_OF_HOST_MEMORY; 2965 } 2966 2967 /* Copy user's data */ 2968 memcpy(&device_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo)); 2969 2970 /* ICD's do not use layers */ 2971 device_create_info.layerCount = 0; 2972 device_create_info.ppEnabledLayerNames = NULL; 2973 2974 device_create_info.extensionCount = 0; 2975 device_create_info.ppEnabledExtensionNames = (const char * const *) filtered_extension_names; 2976 2977 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) { 2978 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; 2979 VkExtensionProperties *prop = get_extension_property(extension_name, 2980 &icd->device_extension_cache[gpu_index]); 2981 if (prop) { 2982 filtered_extension_names[device_create_info.extensionCount] = (char *) extension_name; 2983 device_create_info.extensionCount++; 2984 } 2985 } 2986 2987 res = icd->CreateDevice(gpu, pCreateInfo, pDevice); 2988 if (res != VK_SUCCESS) { 2989 return res; 2990 } 2991 2992 dev = loader_add_logical_device(inst, *pDevice, &icd->logical_device_list); 2993 if (dev == NULL) { 2994 return VK_ERROR_OUT_OF_HOST_MEMORY; 2995 } 2996 PFN_vkGetDeviceProcAddr get_proc_addr = icd->GetDeviceProcAddr; 2997 loader_init_device_dispatch_table(&dev->loader_dispatch, get_proc_addr, 2998 *pDevice, *pDevice); 2999 3000 dev->loader_dispatch.CreateDevice = scratch_vkCreateDevice; 3001 loader_init_dispatch(*pDevice, &dev->loader_dispatch); 3002 3003 /* activate any layers on device chain which terminates with device*/ 3004 res = loader_enable_device_layers(inst, icd, dev, pCreateInfo, &inst->device_layer_list); 3005 if (res != VK_SUCCESS) { 3006 loader_destroy_logical_device(inst, dev); 3007 return res; 3008 } 3009 loader_activate_device_layers(inst, dev, *pDevice); 3010 3011 res = dev->loader_dispatch.CreateDevice(gpu, pCreateInfo, pDevice); 3012 3013 dev->loader_dispatch.CreateDevice = icd->CreateDevice; 3014 3015 return res; 3016} 3017 3018static PFN_vkVoidFunction VKAPI loader_GetInstanceProcAddr(VkInstance instance, const char * pName) 3019{ 3020 if (instance == VK_NULL_HANDLE) 3021 return NULL; 3022 3023 void *addr; 3024 /* get entrypoint addresses that are global (in the loader)*/ 3025 addr = globalGetProcAddr(pName); 3026 if (addr) 3027 return addr; 3028 3029 struct loader_instance *ptr_instance = (struct loader_instance *) instance; 3030 3031 /* return any extension global entrypoints */ 3032 addr = debug_report_instance_gpa(ptr_instance, pName); 3033 if (addr) { 3034 return addr; 3035 } 3036 3037 addr = wsi_swapchain_GetInstanceProcAddr(ptr_instance, pName); 3038 if (addr) { 3039 return addr; 3040 } 3041 3042 /* return the instance dispatch table entrypoint for extensions */ 3043 const VkLayerInstanceDispatchTable *disp_table = * (VkLayerInstanceDispatchTable **) instance; 3044 if (disp_table == NULL) 3045 return NULL; 3046 3047 addr = loader_lookup_instance_dispatch_table(disp_table, pName); 3048 if (addr) 3049 return addr; 3050 3051 return NULL; 3052} 3053 3054LOADER_EXPORT PFN_vkVoidFunction VKAPI vkGetInstanceProcAddr(VkInstance instance, const char * pName) 3055{ 3056 return loader_GetInstanceProcAddr(instance, pName); 3057} 3058 3059static PFN_vkVoidFunction VKAPI loader_GetDeviceProcAddr(VkDevice device, const char * pName) 3060{ 3061 if (device == VK_NULL_HANDLE) { 3062 return NULL; 3063 } 3064 3065 void *addr; 3066 3067 /* for entrypoints that loader must handle (ie non-dispatchable or create object) 3068 make sure the loader entrypoint is returned */ 3069 addr = loader_non_passthrough_gpa(pName); 3070 if (addr) { 3071 return addr; 3072 } 3073 3074 /* return the dispatch table entrypoint for the fastest case */ 3075 const VkLayerDispatchTable *disp_table = * (VkLayerDispatchTable **) device; 3076 if (disp_table == NULL) 3077 return NULL; 3078 3079 addr = loader_lookup_device_dispatch_table(disp_table, pName); 3080 if (addr) 3081 return addr; 3082 else { 3083 if (disp_table->GetDeviceProcAddr == NULL) 3084 return NULL; 3085 return disp_table->GetDeviceProcAddr(device, pName); 3086 } 3087} 3088 3089LOADER_EXPORT PFN_vkVoidFunction VKAPI vkGetDeviceProcAddr(VkDevice device, const char * pName) 3090{ 3091 return loader_GetDeviceProcAddr(device, pName); 3092} 3093 3094LOADER_EXPORT VkResult VKAPI vkEnumerateInstanceExtensionProperties( 3095 const char* pLayerName, 3096 uint32_t* pCount, 3097 VkExtensionProperties* pProperties) 3098{ 3099 struct loader_extension_list *global_ext_list=NULL; 3100 struct loader_layer_list instance_layers; 3101 struct loader_extension_list icd_extensions; 3102 struct loader_icd_libs icd_libs; 3103 uint32_t copy_size; 3104 3105 tls_instance = NULL; 3106 memset(&icd_extensions, 0, sizeof(icd_extensions)); 3107 loader_platform_thread_once(&once_init, loader_initialize); 3108 3109 /* get layer libraries if needed */ 3110 if (pLayerName && strlen(pLayerName) != 0) { 3111 memset(&instance_layers, 0, sizeof(instance_layers)); 3112 loader_layer_scan(NULL, &instance_layers, NULL); 3113 for (uint32_t i = 0; i < instance_layers.count; i++) { 3114 struct loader_layer_properties *props = &instance_layers.list[i]; 3115 if (strcmp(props->info.layerName, pLayerName) == 0) { 3116 global_ext_list = &props->instance_extension_list; 3117 } 3118 } 3119 loader_destroy_layer_list(NULL, &instance_layers); 3120 } 3121 else { 3122 /* Scan/discover all ICD libraries */ 3123 memset(&icd_libs, 0 , sizeof(struct loader_icd_libs)); 3124 loader_icd_scan(NULL, &icd_libs); 3125 /* get extensions from all ICD's, merge so no duplicates */ 3126 loader_get_icd_loader_instance_extensions(NULL, &icd_libs, &icd_extensions); 3127 loader_scanned_icd_clear(NULL, &icd_libs); 3128 global_ext_list = &icd_extensions; 3129 } 3130 3131 if (global_ext_list == NULL) { 3132 return VK_ERROR_LAYER_NOT_PRESENT; 3133 } 3134 3135 if (pProperties == NULL) { 3136 *pCount = global_ext_list->count; 3137 loader_destroy_ext_list(NULL, &icd_extensions); 3138 return VK_SUCCESS; 3139 } 3140 3141 copy_size = *pCount < global_ext_list->count ? *pCount : global_ext_list->count; 3142 for (uint32_t i = 0; i < copy_size; i++) { 3143 memcpy(&pProperties[i], 3144 &global_ext_list->list[i], 3145 sizeof(VkExtensionProperties)); 3146 } 3147 *pCount = copy_size; 3148 loader_destroy_ext_list(NULL, &icd_extensions); 3149 3150 if (copy_size < global_ext_list->count) { 3151 return VK_INCOMPLETE; 3152 } 3153 3154 return VK_SUCCESS; 3155} 3156 3157LOADER_EXPORT VkResult VKAPI vkEnumerateInstanceLayerProperties( 3158 uint32_t* pCount, 3159 VkLayerProperties* pProperties) 3160{ 3161 3162 struct loader_layer_list instance_layer_list; 3163 tls_instance = NULL; 3164 3165 loader_platform_thread_once(&once_init, loader_initialize); 3166 3167 uint32_t copy_size; 3168 3169 /* get layer libraries */ 3170 memset(&instance_layer_list, 0, sizeof(instance_layer_list)); 3171 loader_layer_scan(NULL, &instance_layer_list, NULL); 3172 3173 if (pProperties == NULL) { 3174 *pCount = instance_layer_list.count; 3175 loader_destroy_layer_list(NULL, &instance_layer_list); 3176 return VK_SUCCESS; 3177 } 3178 3179 copy_size = (*pCount < instance_layer_list.count) ? *pCount : instance_layer_list.count; 3180 for (uint32_t i = 0; i < copy_size; i++) { 3181 memcpy(&pProperties[i], &instance_layer_list.list[i].info, sizeof(VkLayerProperties)); 3182 } 3183 *pCount = copy_size; 3184 loader_destroy_layer_list(NULL, &instance_layer_list); 3185 3186 if (copy_size < instance_layer_list.count) { 3187 return VK_INCOMPLETE; 3188 } 3189 3190 return VK_SUCCESS; 3191} 3192 3193VkResult VKAPI loader_EnumerateDeviceExtensionProperties( 3194 VkPhysicalDevice gpu, 3195 const char* pLayerName, 3196 uint32_t* pCount, 3197 VkExtensionProperties* pProperties) 3198{ 3199 uint32_t gpu_index; 3200 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 3201 uint32_t copy_size; 3202 3203 uint32_t count; 3204 struct loader_extension_list *dev_ext_list=NULL; 3205 3206 /* get layer libraries if needed */ 3207 if (pLayerName && strlen(pLayerName) != 0) { 3208 for (uint32_t i = 0; i < icd->this_instance->device_layer_list.count; i++) { 3209 struct loader_layer_properties *props = &icd->this_instance->device_layer_list.list[i]; 3210 if (strcmp(props->info.layerName, pLayerName) == 0) { 3211 dev_ext_list = &props->device_extension_list; 3212 } 3213 } 3214 } 3215 else { 3216 dev_ext_list = &icd->device_extension_cache[gpu_index]; 3217 } 3218 3219 count = (dev_ext_list == NULL) ? 0: dev_ext_list->count; 3220 if (pProperties == NULL) { 3221 *pCount = count; 3222 return VK_SUCCESS; 3223 } 3224 3225 copy_size = *pCount < count ? *pCount : count; 3226 for (uint32_t i = 0; i < copy_size; i++) { 3227 memcpy(&pProperties[i], 3228 &dev_ext_list->list[i], 3229 sizeof(VkExtensionProperties)); 3230 } 3231 *pCount = copy_size; 3232 3233 if (copy_size < count) { 3234 return VK_INCOMPLETE; 3235 } 3236 3237 return VK_SUCCESS; 3238} 3239 3240VkResult VKAPI loader_EnumerateDeviceLayerProperties( 3241 VkPhysicalDevice gpu, 3242 uint32_t* pCount, 3243 VkLayerProperties* pProperties) 3244{ 3245 uint32_t copy_size; 3246 uint32_t gpu_index; 3247 struct loader_icd *icd = loader_get_icd(gpu, &gpu_index); 3248 3249 uint32_t count = icd->this_instance->device_layer_list.count; 3250 3251 if (pProperties == NULL) { 3252 *pCount = count; 3253 return VK_SUCCESS; 3254 } 3255 3256 copy_size = (*pCount < count) ? *pCount : count; 3257 for (uint32_t i = 0; i < copy_size; i++) { 3258 memcpy(&pProperties[i], &(icd->this_instance->device_layer_list.list[i].info), sizeof(VkLayerProperties)); 3259 } 3260 *pCount = copy_size; 3261 3262 if (copy_size < count) { 3263 return VK_INCOMPLETE; 3264 } 3265 3266 return VK_SUCCESS; 3267} 3268