loader.c revision 230e62505478ab0d9b05003e0e48baa5f090c6a0
1/* 2 * Vulkan 3 * 4 * Copyright (C) 2014 LunarG, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included 14 * in all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Chia-I Wu <olv@lunarg.com> 26 * Jon Ashburn <jon@lunarg.com> 27 * Courtney Goeltzenleuchter <courtney@lunarg.com> 28 * Ian Elliott <ian@lunarg.com> 29 */ 30#define _GNU_SOURCE 31#include <stdio.h> 32#include <stdlib.h> 33#include <stdarg.h> 34#include <stdbool.h> 35#include <string.h> 36 37#include <sys/types.h> 38#if defined(WIN32) 39#include "dirent_on_windows.h" 40#else // WIN32 41#include <dirent.h> 42#endif // WIN32 43#include "loader_platform.h" 44#include "table_ops.h" 45#include "gpa_helper.h" 46#include "loader.h" 47#include "vkIcd.h" 48// The following is #included again to catch certain OS-specific functions 49// being used: 50#include "loader_platform.h" 51 52struct loader_layers { 53 loader_platform_dl_handle lib_handle; 54 char name[256]; 55}; 56 57struct layer_name_pair { 58 char *layer_name; 59 const char *lib_name; 60}; 61 62struct extension_property { 63 char extName[VK_MAX_EXTENSION_NAME]; 64 uint32_t version; 65 bool hosted; // does the extension reside in one driver/layer 66}; 67 68struct loader_icd { 69 const struct loader_scanned_icds *scanned_icds; 70 71 VkLayerDispatchTable *loader_dispatch; 72 uint32_t layer_count[MAX_GPUS_FOR_LAYER]; 73 struct loader_layers layer_libs[MAX_GPUS_FOR_LAYER][MAX_LAYER_LIBRARIES]; 74 VkBaseLayerObject *wrappedGpus[MAX_GPUS_FOR_LAYER]; 75 uint32_t gpu_count; 76 VkBaseLayerObject *gpus; 77 78 struct loader_icd *next; 79}; 80 81 82struct loader_scanned_icds { 83 loader_platform_dl_handle handle; 84 85 PFN_vkGetProcAddr GetProcAddr; 86 PFN_vkCreateInstance CreateInstance; 87 PFN_vkDestroyInstance DestroyInstance; 88 PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices; 89 PFN_vkGetGlobalExtensionInfo GetGlobalExtensionInfo; 90 VkInstance instance; 91 struct loader_scanned_icds *next; 92 uint32_t extension_count; 93 struct extension_property *extensions; 94}; 95 96struct loader_scanned_layers { 97 char *name; 98 uint32_t extension_count; 99 struct extension_property *extensions; 100}; 101// Note: Since the following is a static structure, all members are initialized 102// to zero. 103static struct { 104 struct loader_instance *instances; 105 bool icds_scanned; 106 struct loader_scanned_icds *scanned_icd_list; 107 bool layer_scanned; 108 char *layer_dirs; 109 unsigned int scanned_layer_count; 110 struct loader_scanned_layers scanned_layers[MAX_LAYER_LIBRARIES]; 111 size_t scanned_ext_list_capacity; 112 uint32_t scanned_ext_list_count; // coalesced from all layers/drivers 113 struct extension_property **scanned_ext_list; 114} loader; 115 116static LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_icd); 117static LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_layer); 118static LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_exts); 119 120#if defined(WIN32) 121char *loader_get_registry_string(const HKEY hive, 122 const LPCTSTR sub_key, 123 const char *value) 124{ 125 DWORD access_flags = KEY_QUERY_VALUE; 126 DWORD value_type; 127 HKEY key; 128 LONG rtn_value; 129 char *rtn_str = NULL; 130 size_t rtn_len = 0; 131 size_t allocated_len = 0; 132 133 rtn_value = RegOpenKeyEx(hive, sub_key, 0, access_flags, &key); 134 if (rtn_value != ERROR_SUCCESS) { 135 // We didn't find the key. Try the 32-bit hive (where we've seen the 136 // key end up on some people's systems): 137 access_flags |= KEY_WOW64_32KEY; 138 rtn_value = RegOpenKeyEx(hive, sub_key, 0, access_flags, &key); 139 if (rtn_value != ERROR_SUCCESS) { 140 // We still couldn't find the key, so give up: 141 return NULL; 142 } 143 } 144 145 rtn_value = RegQueryValueEx(key, value, NULL, &value_type, 146 (PVOID) rtn_str, &rtn_len); 147 if (rtn_value == ERROR_SUCCESS) { 148 // If we get to here, we found the key, and need to allocate memory 149 // large enough for rtn_str, and query again: 150 allocated_len = rtn_len + 4; 151 rtn_str = malloc(allocated_len); 152 rtn_value = RegQueryValueEx(key, value, NULL, &value_type, 153 (PVOID) rtn_str, &rtn_len); 154 if (rtn_value == ERROR_SUCCESS) { 155 // We added 4 extra bytes to rtn_str, so that we can ensure that 156 // the string is NULL-terminated (albeit, in a brute-force manner): 157 rtn_str[allocated_len-1] = '\0'; 158 } else { 159 // This should never occur, but in case it does, clean up: 160 free(rtn_str); 161 rtn_str = NULL; 162 } 163 } // else - shouldn't happen, but if it does, return rtn_str, which is NULL 164 165 // Close the registry key that was opened: 166 RegCloseKey(key); 167 168 return rtn_str; 169} 170 171 172// For ICD developers, look in the registry, and look for an environment 173// variable for a path(s) where to find the ICD(s): 174static char *loader_get_registry_and_env(const char *env_var, 175 const char *registry_value) 176{ 177 char *env_str = getenv(env_var); 178 size_t env_len = (env_str == NULL) ? 0 : strlen(env_str); 179 char *registry_str = NULL; 180 DWORD registry_len = 0; 181 char *rtn_str = NULL; 182 size_t rtn_len; 183 184 registry_str = loader_get_registry_string(HKEY_LOCAL_MACHINE, 185 "Software\\Vulkan", 186 registry_value); 187 registry_len = (registry_str) ? strlen(registry_str) : 0; 188 189 rtn_len = env_len + registry_len + 1; 190 if (rtn_len <= 2) { 191 // We found neither the desired registry value, nor the environment 192 // variable; return NULL: 193 return NULL; 194 } else { 195 // We found something, and so we need to allocate memory for the string 196 // to return: 197 rtn_str = malloc(rtn_len); 198 } 199 200 if (registry_len == 0) { 201 // We didn't find the desired registry value, and so we must have found 202 // only the environment variable: 203 _snprintf(rtn_str, rtn_len, "%s", env_str); 204 } else if (env_str != NULL) { 205 // We found both the desired registry value and the environment 206 // variable, so concatenate them both: 207 _snprintf(rtn_str, rtn_len, "%s;%s", registry_str, env_str); 208 } else { 209 // We must have only found the desired registry value: 210 _snprintf(rtn_str, rtn_len, "%s", registry_str); 211 } 212 213 if (registry_str) { 214 free(registry_str); 215 } 216 217 return(rtn_str); 218} 219#endif // WIN32 220 221 222static void loader_log(VK_DBG_MSG_TYPE msg_type, int32_t msg_code, 223 const char *format, ...) 224{ 225 char msg[256]; 226 va_list ap; 227 int ret; 228 229 va_start(ap, format); 230 ret = vsnprintf(msg, sizeof(msg), format, ap); 231 if ((ret >= (int) sizeof(msg)) || ret < 0) { 232 msg[sizeof(msg) - 1] = '\0'; 233 } 234 va_end(ap); 235 236 fputs(msg, stderr); 237 fputc('\n', stderr); 238} 239 240static bool has_extension(struct extension_property *exts, uint32_t count, 241 const char *name, bool must_be_hosted) 242{ 243 uint32_t i; 244 for (i = 0; i < count; i++) { 245 if (!strcmp(name, exts[i].extName) && (!must_be_hosted || exts[i].hosted)) 246 return true; 247 } 248 return false; 249} 250 251static void get_global_extensions(PFN_vkGetGlobalExtensionInfo fp_get, 252 uint32_t *count_out, 253 struct extension_property **props_out) 254{ 255 uint32_t i, count, cur; 256 size_t siz = sizeof(count); 257 struct extension_property *ext_props; 258 VkExtensionProperties vk_prop; 259 VkResult res; 260 261 *count_out = 0; 262 *props_out = NULL; 263 res = fp_get(VK_EXTENSION_INFO_TYPE_COUNT, 0, &siz, &count); 264 if (res != VK_SUCCESS) { 265 loader_log(VK_DBG_MSG_WARNING, 0, "Error getting global extension count from ICD"); 266 return; 267 } 268 ext_props = (struct extension_property *) malloc(sizeof(struct extension_property) * count); 269 if (ext_props == NULL) { 270 loader_log(VK_DBG_MSG_WARNING, 0, "Out of memory didn't get global extensions from ICD"); 271 return; 272 } 273 siz = sizeof(VkExtensionProperties); 274 cur = 0; 275 for (i = 0; i < count; i++) { 276 res = fp_get(VK_EXTENSION_INFO_TYPE_PROPERTIES, i, &siz, &vk_prop); 277 if (res == VK_SUCCESS) { 278 (ext_props + cur)->hosted = false; 279 (ext_props + cur)->version = vk_prop.version; 280 strncpy((ext_props + cur)->extName, vk_prop.extName, VK_MAX_EXTENSION_NAME); 281 (ext_props + cur)->extName[VK_MAX_EXTENSION_NAME - 1] = '\0'; 282 cur++; 283 } 284 *count_out = cur; 285 *props_out = ext_props; 286 } 287 return; 288} 289 290static void loader_init_ext_list() 291{ 292 loader.scanned_ext_list_capacity = 256 * sizeof(struct extension_property *); 293 loader.scanned_ext_list = malloc(loader.scanned_ext_list_capacity); 294 memset(loader.scanned_ext_list, 0, loader.scanned_ext_list_capacity); 295 loader.scanned_ext_list_count = 0; 296} 297 298#if 0 // currently no place to call this 299static void loader_destroy_ext_list() 300{ 301 free(loader.scanned_ext_list); 302 loader.scanned_ext_list_capacity = 0; 303 loader.scanned_ext_list_count = 0; 304} 305#endif 306 307static void loader_add_to_ext_list(uint32_t count, 308 struct extension_property *prop_list, 309 bool is_layer_ext) 310{ 311 uint32_t i, j; 312 bool duplicate; 313 struct extension_property *cur_ext; 314 315 if (loader.scanned_ext_list == NULL || loader.scanned_ext_list_capacity == 0) 316 loader_init_ext_list(); 317 318 if (loader.scanned_ext_list == NULL) 319 return; 320 321 struct extension_property *ext_list, **ext_list_addr; 322 323 for (i = 0; i < count; i++) { 324 cur_ext = prop_list + i; 325 326 // look for duplicates or not 327 duplicate = false; 328 for (j = 0; j < loader.scanned_ext_list_count; j++) { 329 ext_list = loader.scanned_ext_list[j]; 330 if (!strcmp(cur_ext->extName, ext_list->extName)) { 331 duplicate = true; 332 ext_list->hosted = false; 333 break; 334 } 335 } 336 337 // add to list at end 338 if (!duplicate) { 339 // check for enough capacity 340 if (loader.scanned_ext_list_count * sizeof(struct extension_property *) 341 >= loader.scanned_ext_list_capacity) { 342 // double capacity 343 loader.scanned_ext_list_capacity *= 2; 344 loader.scanned_ext_list = realloc(loader.scanned_ext_list, 345 loader.scanned_ext_list_capacity); 346 } 347 ext_list_addr = &(loader.scanned_ext_list[loader.scanned_ext_list_count++]); 348 *ext_list_addr = cur_ext; 349 cur_ext->hosted = true; 350 } 351 352 } 353} 354 355static bool loader_is_extension_scanned(const char *name) 356{ 357 uint32_t i; 358 359 for (i = 0; i < loader.scanned_ext_list_count; i++) { 360 if (!strcmp(name, loader.scanned_ext_list[i]->extName)) 361 return true; 362 } 363 return false; 364} 365 366static void loader_coalesce_extensions() 367{ 368 uint32_t i; 369 struct loader_scanned_icds *icd_list = loader.scanned_icd_list; 370 371 // traverse scanned icd list adding non-duplicate extensions to the list 372 while (icd_list != NULL) { 373 loader_add_to_ext_list(icd_list->extension_count, icd_list->extensions, false); 374 icd_list = icd_list->next; 375 }; 376 377 //Traverse layers list adding non-duplicate extensions to the list 378 for (i = 0; i < loader.scanned_layer_count; i++) { 379 loader_add_to_ext_list(loader.scanned_layers[i].extension_count, 380 loader.scanned_layers[i].extensions, true); 381 } 382} 383 384static void loader_icd_destroy(struct loader_icd *icd) 385{ 386 loader_platform_close_library(icd->scanned_icds->handle); 387 free(icd); 388} 389 390static struct loader_icd * loader_icd_create(const struct loader_scanned_icds *scanned) 391{ 392 struct loader_icd *icd; 393 394 icd = malloc(sizeof(*icd)); 395 if (!icd) 396 return NULL; 397 398 memset(icd, 0, sizeof(*icd)); 399 400 icd->scanned_icds = scanned; 401 402 return icd; 403} 404 405static struct loader_icd *loader_icd_add(struct loader_instance *ptr_inst, 406 const struct loader_scanned_icds *scanned) 407{ 408 struct loader_icd *icd; 409 410 icd = loader_icd_create(scanned); 411 if (!icd) 412 return NULL; 413 414 /* prepend to the list */ 415 icd->next = ptr_inst->icds; 416 ptr_inst->icds = icd; 417 418 return icd; 419} 420 421static void loader_scanned_icd_add(const char *filename) 422{ 423 loader_platform_dl_handle handle; 424 void *fp_gpa, *fp_enumerate, *fp_create_inst, *fp_destroy_inst; 425 void *fp_get_global_ext_info; 426 struct loader_scanned_icds *new_node; 427 428 // Used to call: dlopen(filename, RTLD_LAZY); 429 handle = loader_platform_open_library(filename); 430 if (!handle) { 431 loader_log(VK_DBG_MSG_WARNING, 0, loader_platform_open_library_error(filename)); 432 return; 433 } 434 435#define LOOKUP(func_ptr, func) do { \ 436 func_ptr = (PFN_vk ##func) loader_platform_get_proc_address(handle, "vk" #func); \ 437 if (!func_ptr) { \ 438 loader_log(VK_DBG_MSG_WARNING, 0, loader_platform_get_proc_address_error("vk" #func)); \ 439 return; \ 440 } \ 441} while (0) 442 443 LOOKUP(fp_gpa, GetProcAddr); 444 LOOKUP(fp_create_inst, CreateInstance); 445 LOOKUP(fp_destroy_inst, DestroyInstance); 446 LOOKUP(fp_enumerate, EnumeratePhysicalDevices); 447 LOOKUP(fp_get_global_ext_info, GetGlobalExtensionInfo); 448#undef LOOKUP 449 450 new_node = (struct loader_scanned_icds *) malloc(sizeof(struct loader_scanned_icds)); 451 if (!new_node) { 452 loader_log(VK_DBG_MSG_WARNING, 0, "Out of memory can't add icd"); 453 return; 454 } 455 456 new_node->handle = handle; 457 new_node->GetProcAddr = fp_gpa; 458 new_node->CreateInstance = fp_create_inst; 459 new_node->DestroyInstance = fp_destroy_inst; 460 new_node->EnumeratePhysicalDevices = fp_enumerate; 461 new_node->GetGlobalExtensionInfo = fp_get_global_ext_info; 462 new_node->extension_count = 0; 463 new_node->extensions = NULL; 464 new_node->next = loader.scanned_icd_list; 465 466 loader.scanned_icd_list = new_node; 467 468 if (fp_get_global_ext_info) { 469 get_global_extensions((PFN_vkGetGlobalExtensionInfo) fp_get_global_ext_info, 470 &new_node->extension_count, 471 &new_node->extensions); 472 } else { 473 loader_log(VK_DBG_MSG_WARNING, 0, "Couldn't get global extensions from ICD"); 474 } 475} 476 477/** 478 * Try to \c loader_icd_scan VK driver(s). 479 * 480 * This function scans the default system path or path 481 * specified by the \c LIBVK_DRIVERS_PATH environment variable in 482 * order to find loadable VK ICDs with the name of libVK_*. 483 * 484 * \returns 485 * void; but side effect is to set loader_icd_scanned to true 486 */ 487static void loader_icd_scan(void) 488{ 489 const char *p, *next; 490 char *libPaths = NULL; 491 DIR *sysdir; 492 struct dirent *dent; 493 char icd_library[1024]; 494 char path[1024]; 495 uint32_t len; 496#if defined(WIN32) 497 bool must_free_libPaths; 498 libPaths = loader_get_registry_and_env(DRIVER_PATH_ENV, 499 DRIVER_PATH_REGISTRY_VALUE); 500 if (libPaths != NULL) { 501 must_free_libPaths = true; 502 } else { 503 must_free_libPaths = false; 504 libPaths = DEFAULT_VK_DRIVERS_PATH; 505 } 506#else // WIN32 507 if (geteuid() == getuid()) { 508 /* Don't allow setuid apps to use the DRIVER_PATH_ENV env var: */ 509 libPaths = getenv(DRIVER_PATH_ENV); 510 } 511 if (libPaths == NULL) { 512 libPaths = DEFAULT_VK_DRIVERS_PATH; 513 } 514#endif // WIN32 515 516 for (p = libPaths; *p; p = next) { 517 next = strchr(p, PATH_SEPERATOR); 518 if (next == NULL) { 519 len = (uint32_t) strlen(p); 520 next = p + len; 521 } 522 else { 523 len = (uint32_t) (next - p); 524 sprintf(path, "%.*s", (len > sizeof(path) - 1) ? (int) sizeof(path) - 1 : len, p); 525 p = path; 526 next++; 527 } 528 529 // TODO/TBD: Do we want to do this on Windows, or just let Windows take 530 // care of its own search path (which it apparently has)? 531 sysdir = opendir(p); 532 if (sysdir) { 533 dent = readdir(sysdir); 534 while (dent) { 535 /* Look for ICDs starting with VK_DRIVER_LIBRARY_PREFIX and 536 * ending with VK_LIBRARY_SUFFIX 537 */ 538 if (!strncmp(dent->d_name, 539 VK_DRIVER_LIBRARY_PREFIX, 540 VK_DRIVER_LIBRARY_PREFIX_LEN)) { 541 uint32_t nlen = (uint32_t) strlen(dent->d_name); 542 const char *suf = dent->d_name + nlen - VK_LIBRARY_SUFFIX_LEN; 543 if ((nlen > VK_LIBRARY_SUFFIX_LEN) && 544 !strncmp(suf, 545 VK_LIBRARY_SUFFIX, 546 VK_LIBRARY_SUFFIX_LEN)) { 547 snprintf(icd_library, 1024, "%s" DIRECTORY_SYMBOL "%s", p,dent->d_name); 548 loader_scanned_icd_add(icd_library); 549 } 550 } 551 552 dent = readdir(sysdir); 553 } 554 closedir(sysdir); 555 } 556 } 557 558#if defined(WIN32) 559 // Free any allocated memory: 560 if (must_free_libPaths) { 561 free(libPaths); 562 } 563#endif // WIN32 564 565 // Note that we've scanned for ICDs: 566 loader.icds_scanned = true; 567} 568 569 570static void layer_lib_scan(void) 571{ 572 const char *p, *next; 573 char *libPaths = NULL; 574 DIR *curdir; 575 struct dirent *dent; 576 size_t len, i; 577 char temp_str[1024]; 578 uint32_t count; 579 PFN_vkGetGlobalExtensionInfo fp_get_ext; 580 581#if defined(WIN32) 582 bool must_free_libPaths; 583 libPaths = loader_get_registry_and_env(LAYERS_PATH_ENV, 584 LAYERS_PATH_REGISTRY_VALUE); 585 if (libPaths != NULL) { 586 must_free_libPaths = true; 587 } else { 588 must_free_libPaths = false; 589 libPaths = DEFAULT_VK_LAYERS_PATH; 590 } 591#else // WIN32 592 if (geteuid() == getuid()) { 593 /* Don't allow setuid apps to use the DRIVER_PATH_ENV env var: */ 594 libPaths = getenv(LAYERS_PATH_ENV); 595 } 596 if (libPaths == NULL) { 597 libPaths = DEFAULT_VK_LAYERS_PATH; 598 } 599#endif // WIN32 600 601 if (libPaths == NULL) { 602 // Have no paths to search: 603 return; 604 } 605 len = strlen(libPaths); 606 loader.layer_dirs = malloc(len+1); 607 if (loader.layer_dirs == NULL) { 608 free(libPaths); 609 return; 610 } 611 // Alloc passed, so we know there is enough space to hold the string, don't 612 // need strncpy 613 strcpy(loader.layer_dirs, libPaths); 614#if defined(WIN32) 615 // Free any allocated memory: 616 if (must_free_libPaths) { 617 free(libPaths); 618 must_free_libPaths = false; 619 } 620#endif // WIN32 621 libPaths = loader.layer_dirs; 622 623 /* cleanup any previously scanned libraries */ 624 for (i = 0; i < loader.scanned_layer_count; i++) { 625 if (loader.scanned_layers[i].name != NULL) 626 free(loader.scanned_layers[i].name); 627 if (loader.scanned_layers[i].extensions != NULL) 628 free(loader.scanned_layers[i].extensions); 629 loader.scanned_layers[i].name = NULL; 630 loader.scanned_layers[i].extensions = NULL; 631 } 632 loader.scanned_layer_count = 0; 633 count = 0; 634 635 for (p = libPaths; *p; p = next) { 636 next = strchr(p, PATH_SEPERATOR); 637 if (next == NULL) { 638 len = (uint32_t) strlen(p); 639 next = p + len; 640 } 641 else { 642 len = (uint32_t) (next - p); 643 *(char *) next = '\0'; 644 next++; 645 } 646 647 curdir = opendir(p); 648 if (curdir) { 649 dent = readdir(curdir); 650 while (dent) { 651 /* Look for layers starting with VK_LAYER_LIBRARY_PREFIX and 652 * ending with VK_LIBRARY_SUFFIX 653 */ 654 if (!strncmp(dent->d_name, 655 VK_LAYER_LIBRARY_PREFIX, 656 VK_LAYER_LIBRARY_PREFIX_LEN)) { 657 uint32_t nlen = (uint32_t) strlen(dent->d_name); 658 const char *suf = dent->d_name + nlen - VK_LIBRARY_SUFFIX_LEN; 659 if ((nlen > VK_LIBRARY_SUFFIX_LEN) && 660 !strncmp(suf, 661 VK_LIBRARY_SUFFIX, 662 VK_LIBRARY_SUFFIX_LEN)) { 663 loader_platform_dl_handle handle; 664 snprintf(temp_str, sizeof(temp_str), 665 "%s" DIRECTORY_SYMBOL "%s",p,dent->d_name); 666 // Used to call: dlopen(temp_str, RTLD_LAZY) 667 if ((handle = loader_platform_open_library(temp_str)) == NULL) { 668 dent = readdir(curdir); 669 continue; 670 } 671 if (count == MAX_LAYER_LIBRARIES) { 672 loader_log(VK_DBG_MSG_ERROR, 0, 673 "%s ignored: max layer libraries exceed", 674 temp_str); 675 break; 676 } 677 fp_get_ext = loader_platform_get_proc_address(handle, 678 "vkGetGlobalExtensionInfo"); 679 680 if (!fp_get_ext) { 681 loader_log(VK_DBG_MSG_WARNING, 0, 682 "Couldn't dlsym vkGetGlobalExtensionInfo from library %s", 683 temp_str); 684 dent = readdir(curdir); 685 loader_platform_close_library(handle); 686 continue; 687 } 688 689 loader.scanned_layers[count].name = 690 malloc(strlen(temp_str) + 1); 691 if (loader.scanned_layers[count].name == NULL) { 692 loader_log(VK_DBG_MSG_ERROR, 0, "%s ignored: out of memory", temp_str); 693 break; 694 } 695 696 get_global_extensions(fp_get_ext, 697 &loader.scanned_layers[count].extension_count, 698 &loader.scanned_layers[count].extensions); 699 700 701 strcpy(loader.scanned_layers[count].name, temp_str); 702 count++; 703 loader_platform_close_library(handle); 704 } 705 } 706 707 dent = readdir(curdir); 708 } // while (dir_entry) 709 if (count == MAX_LAYER_LIBRARIES) 710 break; 711 closedir(curdir); 712 } // if (curdir)) 713 } // for (libpaths) 714 715 loader.scanned_layer_count = count; 716 loader.layer_scanned = true; 717} 718 719static void loader_init_dispatch_table(VkLayerDispatchTable *tab, PFN_vkGetProcAddr fpGPA, VkPhysicalDevice gpu) 720{ 721 loader_initialize_dispatch_table(tab, fpGPA, gpu); 722 723 if (tab->EnumerateLayers == NULL) 724 tab->EnumerateLayers = vkEnumerateLayers; 725} 726 727static void * VKAPI loader_gpa_internal(VkPhysicalDevice gpu, const char * pName) 728{ 729 if (gpu == VK_NULL_HANDLE) { 730 return NULL;; 731 } 732 VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu; 733 VkLayerDispatchTable * disp_table = * (VkLayerDispatchTable **) gpuw->baseObject; 734 void *addr; 735 736 if (disp_table == NULL) 737 return NULL; 738 739 addr = loader_lookup_dispatch_table(disp_table, pName); 740 if (addr) 741 return addr; 742 else { 743 if (disp_table->GetProcAddr == NULL) 744 return NULL; 745 return disp_table->GetProcAddr(gpuw->nextObject, pName); 746 } 747} 748 749extern struct loader_icd * loader_get_icd(const VkBaseLayerObject *gpu, uint32_t *gpu_index) 750{ 751 /* 752 * NOTE: at this time icd->gpus is pointing to wrapped GPUs, but no where else 753 * are wrapped gpus used. Should go away. The incoming gpu is NOT wrapped so 754 * need to test it against the wrapped GPU's base object. 755 */ 756 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { 757 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) { 758 for (uint32_t i = 0; i < icd->gpu_count; i++) 759 if ((icd->gpus + i) == gpu || (void*)(icd->gpus +i)->baseObject == gpu) { 760 *gpu_index = i; 761 return icd; 762 } 763 } 764 } 765 return NULL; 766} 767 768static bool loader_layers_activated(const struct loader_icd *icd, const uint32_t gpu_index) 769{ 770 if (icd->layer_count[gpu_index]) 771 return true; 772 else 773 return false; 774} 775 776static void loader_init_layer_libs(struct loader_icd *icd, uint32_t gpu_index, 777 struct layer_name_pair * pLayerNames, 778 uint32_t count) 779{ 780 if (!icd) 781 return; 782 783 struct loader_layers *obj; 784 bool foundLib; 785 for (uint32_t i = 0; i < count; i++) { 786 foundLib = false; 787 for (uint32_t j = 0; j < icd->layer_count[gpu_index]; j++) { 788 if (icd->layer_libs[gpu_index][j].lib_handle && 789 !strcmp(icd->layer_libs[gpu_index][j].name, 790 (char *) pLayerNames[i].layer_name) && 791 strcmp("Validation", (char *) pLayerNames[i].layer_name)) { 792 foundLib = true; 793 break; 794 } 795 } 796 if (!foundLib) { 797 obj = &(icd->layer_libs[gpu_index][i]); 798 strncpy(obj->name, (char *) pLayerNames[i].layer_name, sizeof(obj->name) - 1); 799 obj->name[sizeof(obj->name) - 1] = '\0'; 800 // Used to call: dlopen(pLayerNames[i].lib_name, RTLD_LAZY | RTLD_DEEPBIND) 801 if ((obj->lib_handle = loader_platform_open_library(pLayerNames[i].lib_name)) == NULL) { 802 loader_log(VK_DBG_MSG_ERROR, 0, loader_platform_open_library_error(pLayerNames[i].lib_name)); 803 continue; 804 } else { 805 loader_log(VK_DBG_MSG_UNKNOWN, 0, "Inserting layer %s from library %s", 806 pLayerNames[i].layer_name, pLayerNames[i].lib_name); 807 } 808 free(pLayerNames[i].layer_name); 809 icd->layer_count[gpu_index]++; 810 } 811 } 812} 813 814static bool find_layer_extension(struct loader_icd *icd, uint32_t gpu_index, 815 const char *pExtName, uint32_t *out_count, 816 char *lib_name[MAX_LAYER_LIBRARIES]) 817{ 818 char *search_name; 819 uint32_t j, found_count = 0; 820 bool must_be_hosted; 821 bool found = false; 822 823 /* 824 * The loader provides the abstraction that make layers and extensions work via 825 * the currently defined extension mechanism. That is, when app queries for an extension 826 * via vkGetGlobalExtensionInfo, the loader will call both the driver as well as any layers 827 * to see who implements that extension. Then, if the app enables the extension during 828 * vkCreateInstance the loader will find and load any layers that implement that extension. 829 */ 830 831 // TODO: what about GetPhysicalDeviceExtension for device specific layers/extensions 832 833 for (j = 0; j < loader.scanned_layer_count; j++) { 834 835 if (!strcmp("Validation", pExtName)) 836 must_be_hosted = false; 837 else 838 must_be_hosted = true; 839 if (has_extension(loader.scanned_layers[j].extensions, 840 loader.scanned_layers[j].extension_count, pExtName, 841 must_be_hosted)) { 842 843 found = true; 844 lib_name[found_count] = loader.scanned_layers[j].name; 845 found_count++; 846 } else { 847 // Extension not found in list for the layer, so test the layer name 848 // as if it is an extension name. Use default layer name based on 849 // library name VK_LAYER_LIBRARY_PREFIX<name>.VK_LIBRARY_SUFFIX 850 char *pEnd; 851 size_t siz; 852 853 search_name = loader.scanned_layers[j].name; 854 search_name = basename(search_name); 855 search_name += strlen(VK_LAYER_LIBRARY_PREFIX); 856 pEnd = strrchr(search_name, '.'); 857 siz = (int) (pEnd - search_name); 858 if (siz != strlen(pExtName)) 859 continue; 860 861 if (strncmp(search_name, pExtName, siz) == 0) { 862 found = true; 863 lib_name[found_count] = loader.scanned_layers[j].name; 864 found_count++; 865 } 866 } 867 } 868 869 *out_count = found_count; 870 return found; 871} 872 873static uint32_t loader_get_layer_env(struct loader_icd *icd, uint32_t gpu_index, struct layer_name_pair *pLayerNames) 874{ 875 char *layerEnv; 876 uint32_t i, len, found_count, count = 0; 877 char *p, *pOrig, *next, *name; 878 879#if defined(WIN32) 880 layerEnv = loader_get_registry_and_env(LAYER_NAMES_ENV, 881 LAYER_NAMES_REGISTRY_VALUE); 882#else // WIN32 883 layerEnv = getenv(LAYER_NAMES_ENV); 884#endif // WIN32 885 if (layerEnv == NULL) { 886 return 0; 887 } 888 p = malloc(strlen(layerEnv) + 1); 889 if (p == NULL) { 890#if defined(WIN32) 891 free(layerEnv); 892#endif // WIN32 893 return 0; 894 } 895 strcpy(p, layerEnv); 896#if defined(WIN32) 897 free(layerEnv); 898#endif // WIN32 899 pOrig = p; 900 901 while (p && *p && count < MAX_LAYER_LIBRARIES) { 902 char *lib_name[MAX_LAYER_LIBRARIES]; 903 //memset(&lib_name[0], 0, sizeof(const char *) * MAX_LAYER_LIBRARIES); 904 next = strchr(p, PATH_SEPERATOR); 905 if (next == NULL) { 906 len = (uint32_t) strlen(p); 907 next = p + len; 908 } else { 909 len = (uint32_t) (next - p); 910 *(char *) next = '\0'; 911 next++; 912 } 913 name = basename(p); 914 if (!find_layer_extension(icd, gpu_index, name, &found_count, lib_name)) { 915 p = next; 916 continue; 917 } 918 919 for (i = 0; i < found_count; i++) { 920 len = (uint32_t) strlen(name); 921 pLayerNames[count].layer_name = malloc(len + 1); 922 if (!pLayerNames[count].layer_name) { 923 free(pOrig); 924 return count; 925 } 926 strncpy((char *) pLayerNames[count].layer_name, name, len); 927 pLayerNames[count].layer_name[len] = '\0'; 928 pLayerNames[count].lib_name = lib_name[i]; 929 count++; 930 } 931 p = next; 932 933 } 934 935 free(pOrig); 936 return count; 937} 938 939static uint32_t loader_get_layer_libs(struct loader_icd *icd, uint32_t gpu_index, uint32_t ext_count, const char *const* ext_names, struct layer_name_pair **ppLayerNames) 940{ 941 static struct layer_name_pair layerNames[MAX_LAYER_LIBRARIES]; 942 char *lib_name[MAX_LAYER_LIBRARIES]; 943 uint32_t found_count, count = 0; 944 bool skip; 945 946 *ppLayerNames = &layerNames[0]; 947 /* Load any layers specified in the environment first */ 948 count = loader_get_layer_env(icd, gpu_index, layerNames); 949 950 for (uint32_t i = 0; i < ext_count; i++) { 951 const char *pExtName = ext_names[i]; 952 953 skip = false; 954 for (uint32_t j = 0; j < count; j++) { 955 if (!strcmp(pExtName, layerNames[j].layer_name) ) { 956 // Extension / Layer already on the list skip it 957 skip = true; 958 break; 959 } 960 } 961 962 if (!skip && find_layer_extension(icd, gpu_index, pExtName, &found_count, lib_name)) { 963 964 for (uint32_t j = 0; j < found_count; j++) { 965 uint32_t len; 966 len = (uint32_t) strlen(pExtName); 967 968 969 layerNames[count].layer_name = malloc(len + 1); 970 if (!layerNames[count].layer_name) 971 return count; 972 strncpy((char *) layerNames[count].layer_name, pExtName, len); 973 layerNames[count].layer_name[len] = '\0'; 974 layerNames[count].lib_name = lib_name[j]; 975 count++; 976 } 977 } 978 } 979 980 return count; 981} 982 983static void loader_deactivate_layer(const struct loader_instance *instance) 984{ 985 struct loader_icd *icd; 986 struct loader_layers *libs; 987 988 for (icd = instance->icds; icd; icd = icd->next) { 989 if (icd->gpus) 990 free(icd->gpus); 991 icd->gpus = NULL; 992 if (icd->loader_dispatch) 993 free(icd->loader_dispatch); 994 icd->loader_dispatch = NULL; 995 for (uint32_t j = 0; j < icd->gpu_count; j++) { 996 if (icd->layer_count[j] > 0) { 997 for (uint32_t i = 0; i < icd->layer_count[j]; i++) { 998 libs = &(icd->layer_libs[j][i]); 999 if (libs->lib_handle) 1000 loader_platform_close_library(libs->lib_handle); 1001 libs->lib_handle = NULL; 1002 } 1003 if (icd->wrappedGpus[j]) 1004 free(icd->wrappedGpus[j]); 1005 } 1006 icd->layer_count[j] = 0; 1007 } 1008 icd->gpu_count = 0; 1009 } 1010} 1011 1012extern uint32_t loader_activate_layers(struct loader_icd *icd, uint32_t gpu_index, uint32_t ext_count, const char *const* ext_names) 1013{ 1014 uint32_t count; 1015 VkBaseLayerObject *gpu; 1016 struct layer_name_pair *pLayerNames; 1017 if (!icd) 1018 return 0; 1019 assert(gpu_index < MAX_GPUS_FOR_LAYER); 1020 1021 gpu = icd->gpus + gpu_index; 1022 /* activate any layer libraries */ 1023 if (!loader_layers_activated(icd, gpu_index)) { 1024 VkBaseLayerObject *gpuObj = gpu; 1025 VkBaseLayerObject *nextGpuObj, *baseObj = (VkBaseLayerObject *) gpuObj->baseObject; 1026 PFN_vkGetProcAddr nextGPA = loader_gpa_internal; 1027 1028 count = loader_get_layer_libs(icd, gpu_index, ext_count, ext_names, &pLayerNames); 1029 if (!count) 1030 return 0; 1031 loader_init_layer_libs(icd, gpu_index, pLayerNames, count); 1032 1033 icd->wrappedGpus[gpu_index] = malloc(sizeof(VkBaseLayerObject) * icd->layer_count[gpu_index]); 1034 if (! icd->wrappedGpus[gpu_index]) 1035 loader_log(VK_DBG_MSG_ERROR, 0, "Failed to malloc Gpu objects for layer"); 1036 for (int32_t i = icd->layer_count[gpu_index] - 1; i >= 0; i--) { 1037 nextGpuObj = (icd->wrappedGpus[gpu_index] + i); 1038 nextGpuObj->pGPA = nextGPA; 1039 nextGpuObj->baseObject = (VkObject) baseObj; 1040 nextGpuObj->nextObject = (VkObject) gpuObj; 1041 gpuObj = nextGpuObj; 1042 1043 char funcStr[256]; 1044 snprintf(funcStr, 256, "%sGetProcAddr",icd->layer_libs[gpu_index][i].name); 1045 if ((nextGPA = (PFN_vkGetProcAddr) loader_platform_get_proc_address(icd->layer_libs[gpu_index][i].lib_handle, funcStr)) == NULL) 1046 nextGPA = (PFN_vkGetProcAddr) loader_platform_get_proc_address(icd->layer_libs[gpu_index][i].lib_handle, "vkGetProcAddr"); 1047 if (!nextGPA) { 1048 loader_log(VK_DBG_MSG_ERROR, 0, "Failed to find vkGetProcAddr in layer %s", icd->layer_libs[gpu_index][i].name); 1049 continue; 1050 } 1051 1052 if (i == 0) { 1053 loader_init_dispatch_table(icd->loader_dispatch + gpu_index, nextGPA, (VkPhysicalDevice) gpuObj); 1054 //Insert the new wrapped objects into the list with loader object at head 1055 gpu->nextObject = (VkObject) gpuObj; 1056 gpu->pGPA = nextGPA; 1057 gpuObj = icd->wrappedGpus[gpu_index] + icd->layer_count[gpu_index] - 1; 1058 gpuObj->nextObject = (VkObject) baseObj; 1059 gpuObj->pGPA = icd->scanned_icds->GetProcAddr; 1060 } 1061 1062 } 1063 } 1064 else { 1065 //make sure requested Layers matches currently activated Layers 1066 count = loader_get_layer_libs(icd, gpu_index, ext_count, ext_names, &pLayerNames); 1067 for (uint32_t i = 0; i < count; i++) { 1068 if (strcmp(icd->layer_libs[gpu_index][i].name, pLayerNames[i].layer_name)) { 1069 loader_log(VK_DBG_MSG_ERROR, 0, "Layers activated != Layers requested"); 1070 break; 1071 } 1072 } 1073 if (count != icd->layer_count[gpu_index]) { 1074 loader_log(VK_DBG_MSG_ERROR, 0, "Number of Layers activated != number requested"); 1075 } 1076 } 1077 return icd->layer_count[gpu_index]; 1078} 1079 1080LOADER_EXPORT VkResult VKAPI vkCreateInstance( 1081 const VkInstanceCreateInfo* pCreateInfo, 1082 VkInstance* pInstance) 1083{ 1084 struct loader_instance *ptr_instance = NULL; 1085 struct loader_scanned_icds *scanned_icds; 1086 struct loader_icd *icd; 1087 VkResult res = VK_ERROR_INITIALIZATION_FAILED; 1088 uint32_t i; 1089 1090 /* Scan/discover all ICD libraries in a single-threaded manner */ 1091 loader_platform_thread_once(&once_icd, loader_icd_scan); 1092 1093 /* get layer libraries in a single-threaded manner */ 1094 loader_platform_thread_once(&once_layer, layer_lib_scan); 1095 1096 /* merge any duplicate extensions */ 1097 loader_platform_thread_once(&once_exts, loader_coalesce_extensions); 1098 1099 ptr_instance = (struct loader_instance*) malloc(sizeof(struct loader_instance)); 1100 if (ptr_instance == NULL) { 1101 return VK_ERROR_OUT_OF_HOST_MEMORY; 1102 } 1103 memset(ptr_instance, 0, sizeof(struct loader_instance)); 1104 ptr_instance->extension_count = pCreateInfo->extensionCount; 1105 ptr_instance->extension_names = (ptr_instance->extension_count > 0) ? 1106 malloc(sizeof (char *) * ptr_instance->extension_count) : NULL; 1107 if (ptr_instance->extension_names == NULL && (ptr_instance->extension_count > 0)) 1108 return VK_ERROR_OUT_OF_HOST_MEMORY; 1109 for (i = 0; i < ptr_instance->extension_count; i++) { 1110 if (!loader_is_extension_scanned(pCreateInfo->ppEnabledExtensionNames[i])) 1111 return VK_ERROR_INVALID_EXTENSION; 1112 ptr_instance->extension_names[i] = malloc(strlen(pCreateInfo->ppEnabledExtensionNames[i]) + 1); 1113 if (ptr_instance->extension_names[i] == NULL) 1114 return VK_ERROR_OUT_OF_HOST_MEMORY; 1115 strcpy(ptr_instance->extension_names[i], pCreateInfo->ppEnabledExtensionNames[i]); 1116 } 1117 ptr_instance->next = loader.instances; 1118 loader.instances = ptr_instance; 1119 1120 scanned_icds = loader.scanned_icd_list; 1121 while (scanned_icds) { 1122 icd = loader_icd_add(ptr_instance, scanned_icds); 1123 if (icd) { 1124 res = scanned_icds->CreateInstance(pCreateInfo, 1125 &(scanned_icds->instance)); 1126 if (res != VK_SUCCESS) 1127 { 1128 ptr_instance->icds = ptr_instance->icds->next; 1129 loader_icd_destroy(icd); 1130 scanned_icds->instance = VK_NULL_HANDLE; 1131 loader_log(VK_DBG_MSG_WARNING, 0, 1132 "ICD ignored: failed to CreateInstance on device"); 1133 } 1134 } 1135 scanned_icds = scanned_icds->next; 1136 } 1137 1138 if (ptr_instance->icds == NULL) { 1139 return VK_ERROR_INCOMPATIBLE_DRIVER; 1140 } 1141 1142 *pInstance = (VkInstance) ptr_instance; 1143 return VK_SUCCESS; 1144} 1145 1146LOADER_EXPORT VkResult VKAPI vkDestroyInstance( 1147 VkInstance instance) 1148{ 1149 struct loader_instance *ptr_instance = (struct loader_instance *) instance; 1150 struct loader_scanned_icds *scanned_icds; 1151 VkResult res; 1152 uint32_t i; 1153 1154 // Remove this instance from the list of instances: 1155 struct loader_instance *prev = NULL; 1156 struct loader_instance *next = loader.instances; 1157 while (next != NULL) { 1158 if (next == ptr_instance) { 1159 // Remove this instance from the list: 1160 for (i = 0; i < ptr_instance->extension_count; i++) { 1161 free(ptr_instance->extension_names[i]); 1162 } 1163 if (prev) 1164 prev->next = next->next; 1165 else 1166 loader.instances = next->next; 1167 break; 1168 } 1169 prev = next; 1170 next = next->next; 1171 } 1172 if (next == NULL) { 1173 // This must be an invalid instance handle or empty list 1174 return VK_ERROR_INVALID_HANDLE; 1175 } 1176 1177 // cleanup any prior layer initializations 1178 loader_deactivate_layer(ptr_instance); 1179 1180 scanned_icds = loader.scanned_icd_list; 1181 while (scanned_icds) { 1182 if (scanned_icds->instance) 1183 res = scanned_icds->DestroyInstance(scanned_icds->instance); 1184 if (res != VK_SUCCESS) 1185 loader_log(VK_DBG_MSG_WARNING, 0, 1186 "ICD ignored: failed to DestroyInstance on device"); 1187 scanned_icds->instance = VK_NULL_HANDLE; 1188 scanned_icds = scanned_icds->next; 1189 } 1190 1191 free(ptr_instance); 1192 1193 return VK_SUCCESS; 1194} 1195 1196LOADER_EXPORT VkResult VKAPI vkEnumeratePhysicalDevices( 1197 1198 VkInstance instance, 1199 uint32_t* pPhysicalDeviceCount, 1200 VkPhysicalDevice* pPhysicalDevices) 1201{ 1202 struct loader_instance *ptr_instance = (struct loader_instance *) instance; 1203 struct loader_icd *icd; 1204 uint32_t n, count = 0; 1205 VkResult res; 1206 1207 //in spirit of VK don't error check on the instance parameter 1208 icd = ptr_instance->icds; 1209 if (pPhysicalDevices == NULL) { 1210 while (icd) { 1211 res = icd->scanned_icds->EnumeratePhysicalDevices( 1212 icd->scanned_icds->instance, 1213 &n, NULL); 1214 if (res != VK_SUCCESS) 1215 return res; 1216 icd->gpu_count = n; 1217 count += n; 1218 icd = icd->next; 1219 } 1220 1221 ptr_instance->total_gpu_count = count; 1222 1223 } else 1224 { 1225 VkPhysicalDevice* gpus; 1226 if (*pPhysicalDeviceCount < ptr_instance->total_gpu_count) 1227 return VK_ERROR_INVALID_VALUE; 1228 gpus = malloc( sizeof(VkPhysicalDevice) * *pPhysicalDeviceCount); 1229 if (!gpus) 1230 return VK_ERROR_OUT_OF_HOST_MEMORY; 1231 while (icd) { 1232 VkBaseLayerObject * wrapped_gpus; 1233 PFN_vkGetProcAddr get_proc_addr = icd->scanned_icds->GetProcAddr; 1234 1235 res = icd->scanned_icds->EnumeratePhysicalDevices( 1236 icd->scanned_icds->instance, 1237 &n, 1238 gpus); 1239 if (res == VK_SUCCESS && n) { 1240 wrapped_gpus = (VkBaseLayerObject*) malloc(n * 1241 sizeof(VkBaseLayerObject)); 1242 icd->gpus = wrapped_gpus; 1243 icd->gpu_count = n; 1244 icd->loader_dispatch = (VkLayerDispatchTable *) malloc(n * 1245 sizeof(VkLayerDispatchTable)); 1246 for (unsigned int i = 0; i < n; i++) { 1247 (wrapped_gpus + i)->baseObject = gpus[i]; 1248 (wrapped_gpus + i)->pGPA = get_proc_addr; 1249 (wrapped_gpus + i)->nextObject = gpus[i]; 1250 memcpy(pPhysicalDevices + count, gpus, sizeof(*pPhysicalDevices)); 1251 loader_init_dispatch_table(icd->loader_dispatch + i, 1252 get_proc_addr, gpus[i]); 1253 1254 /* Verify ICD compatibility */ 1255 if (!valid_loader_magic_value((void*) gpus[i])) { 1256 loader_log(VK_DBG_MSG_WARNING, 0, 1257 "Loader: Incompatible ICD, first dword must be initialized to ICD_LOADER_MAGIC. See loader/README.md for details.\n"); 1258 assert(0); 1259 } 1260 1261 const VkLayerDispatchTable **disp; 1262 disp = (const VkLayerDispatchTable **) gpus[i]; 1263 *disp = icd->loader_dispatch + i; 1264 loader_activate_layers(icd, i, ptr_instance->extension_count, 1265 (const char *const*) ptr_instance->extension_names); 1266 } 1267 1268 count += n; 1269 1270 if (count >= *pPhysicalDeviceCount) { 1271 break; 1272 } 1273 } 1274 1275 icd = icd->next; 1276 } 1277 } 1278 1279 *pPhysicalDeviceCount = count; 1280 1281 return (count > 0) ? VK_SUCCESS : res; 1282} 1283 1284LOADER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalDevice gpu, const char * pName) 1285{ 1286 if (gpu == VK_NULL_HANDLE) { 1287 1288 /* return entrypoint addresses that are global (in the loader)*/ 1289 return globalGetProcAddr(pName); 1290 } 1291 1292 void *addr; 1293 1294 /* for entrypoints that loader must handle (ie non-dispatchable or create object) 1295 make sure the loader entrypoint is returned */ 1296 addr = loader_non_passthrough_gpa(pName); 1297 if (addr) { 1298 return addr; 1299 } 1300 1301 /* return the dispatch table entrypoint for the fastest case */ 1302 const VkLayerDispatchTable *disp_table = * (VkLayerDispatchTable **) gpu; 1303 if (disp_table == NULL) 1304 return NULL; 1305 1306 addr = loader_lookup_dispatch_table(disp_table, pName); 1307 if (addr) 1308 return addr; 1309 else { 1310 if (disp_table->GetProcAddr == NULL) 1311 return NULL; 1312 return disp_table->GetProcAddr(gpu, pName); 1313 } 1314} 1315 1316//TODO make sure createInstance enables extensions that are valid (loader does) 1317//TODO make sure CreateDevice enables extensions that are valid (left for layers/drivers to do) 1318 1319//TODO how is layer extension going to be enabled? 1320//Need to call createInstance on the layer or something 1321 1322LOADER_EXPORT VkResult VKAPI vkGetGlobalExtensionInfo( 1323 VkExtensionInfoType infoType, 1324 uint32_t extensionIndex, 1325 size_t* pDataSize, 1326 void* pData) 1327{ 1328 VkExtensionProperties *ext_props; 1329 uint32_t *count; 1330 /* Scan/discover all ICD libraries in a single-threaded manner */ 1331 loader_platform_thread_once(&once_icd, loader_icd_scan); 1332 1333 /* get layer libraries in a single-threaded manner */ 1334 loader_platform_thread_once(&once_layer, layer_lib_scan); 1335 1336 /* merge any duplicate extensions */ 1337 loader_platform_thread_once(&once_exts, loader_coalesce_extensions); 1338 1339 1340 if (pDataSize == NULL) 1341 return VK_ERROR_INVALID_POINTER; 1342 1343 switch (infoType) { 1344 case VK_EXTENSION_INFO_TYPE_COUNT: 1345 *pDataSize = sizeof(uint32_t); 1346 if (pData == NULL) 1347 return VK_SUCCESS; 1348 count = (uint32_t *) pData; 1349 *count = loader.scanned_ext_list_count; 1350 break; 1351 case VK_EXTENSION_INFO_TYPE_PROPERTIES: 1352 *pDataSize = sizeof(VkExtensionProperties); 1353 if (pData == NULL) 1354 return VK_SUCCESS; 1355 if (extensionIndex >= loader.scanned_ext_list_count) 1356 return VK_ERROR_INVALID_VALUE; 1357 ext_props = (VkExtensionProperties *) pData; 1358 ext_props->version = loader.scanned_ext_list[extensionIndex]->version; 1359 strncpy(ext_props->extName, loader.scanned_ext_list[extensionIndex]->extName 1360 , VK_MAX_EXTENSION_NAME); 1361 ext_props->extName[VK_MAX_EXTENSION_NAME - 1] = '\0'; 1362 break; 1363 default: 1364 loader_log(VK_DBG_MSG_WARNING, 0, "Invalid infoType in vkGetGlobalExtensionInfo"); 1365 return VK_ERROR_INVALID_VALUE; 1366 }; 1367 1368 return VK_SUCCESS; 1369} 1370 1371LOADER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalDevice gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved) 1372{ 1373 uint32_t gpu_index; 1374 size_t count = 0; 1375 char *lib_name; 1376 struct loader_icd *icd = loader_get_icd((const VkBaseLayerObject *) gpu, &gpu_index); 1377 loader_platform_dl_handle handle; 1378 PFN_vkEnumerateLayers fpEnumerateLayers; 1379 char layer_buf[16][256]; 1380 char * layers[16]; 1381 1382 if (pOutLayerCount == NULL || pOutLayers == NULL) 1383 return VK_ERROR_INVALID_POINTER; 1384 1385 if (!icd) 1386 return VK_ERROR_UNAVAILABLE; 1387 1388 for (int i = 0; i < 16; i++) 1389 layers[i] = &layer_buf[i][0]; 1390 1391 for (unsigned int j = 0; j < loader.scanned_layer_count && count < maxLayerCount; j++) { 1392 lib_name = loader.scanned_layers[j].name; 1393 // Used to call: dlopen(*lib_name, RTLD_LAZY) 1394 if ((handle = loader_platform_open_library(lib_name)) == NULL) 1395 continue; 1396 if ((fpEnumerateLayers = loader_platform_get_proc_address(handle, "vkEnumerateLayers")) == NULL) { 1397 //use default layer name based on library name VK_LAYER_LIBRARY_PREFIX<name>.VK_LIBRARY_SUFFIX 1398 char *pEnd, *cpyStr; 1399 size_t siz; 1400 loader_platform_close_library(handle); 1401 lib_name = basename(lib_name); 1402 pEnd = strrchr(lib_name, '.'); 1403 siz = (int) (pEnd - lib_name - strlen(VK_LAYER_LIBRARY_PREFIX) + 1); 1404 if (pEnd == NULL || siz <= 0) 1405 continue; 1406 cpyStr = malloc(siz); 1407 if (cpyStr == NULL) { 1408 free(cpyStr); 1409 continue; 1410 } 1411 strncpy(cpyStr, lib_name + strlen(VK_LAYER_LIBRARY_PREFIX), siz); 1412 cpyStr[siz - 1] = '\0'; 1413 if (siz > maxStringSize) 1414 siz = (int) maxStringSize; 1415 strncpy((char *) (pOutLayers[count]), cpyStr, siz); 1416 pOutLayers[count][siz - 1] = '\0'; 1417 count++; 1418 free(cpyStr); 1419 } else { 1420 size_t cnt; 1421 uint32_t n; 1422 VkResult res; 1423 n = (uint32_t) ((maxStringSize < 256) ? maxStringSize : 256); 1424 res = fpEnumerateLayers((VkPhysicalDevice) NULL, 16, n, &cnt, layers, (char *) icd->gpus + gpu_index); 1425 loader_platform_close_library(handle); 1426 if (res != VK_SUCCESS) 1427 continue; 1428 if (cnt + count > maxLayerCount) 1429 cnt = maxLayerCount - count; 1430 for (uint32_t i = (uint32_t) count; i < cnt + count; i++) { 1431 strncpy((char *) (pOutLayers[i]), (char *) layers[i - count], n); 1432 if (n > 0) 1433 pOutLayers[i - count][n - 1] = '\0'; 1434 } 1435 count += cnt; 1436 } 1437 } 1438 1439 *pOutLayerCount = count; 1440 1441 return VK_SUCCESS; 1442} 1443 1444LOADER_EXPORT VkResult VKAPI vkDbgRegisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData) 1445{ 1446 const struct loader_icd *icd; 1447 struct loader_instance *inst; 1448 VkResult res; 1449 uint32_t gpu_idx; 1450 1451 if (instance == VK_NULL_HANDLE) 1452 return VK_ERROR_INVALID_HANDLE; 1453 1454 assert(loader.icds_scanned); 1455 1456 for (inst = loader.instances; inst; inst = inst->next) { 1457 if ((VkInstance) inst == instance) 1458 break; 1459 } 1460 1461 if (inst == VK_NULL_HANDLE) 1462 return VK_ERROR_INVALID_HANDLE; 1463 1464 for (icd = inst->icds; icd; icd = icd->next) { 1465 for (uint32_t i = 0; i < icd->gpu_count; i++) { 1466 res = (icd->loader_dispatch + i)->DbgRegisterMsgCallback(icd->scanned_icds->instance, 1467 pfnMsgCallback, pUserData); 1468 if (res != VK_SUCCESS) { 1469 gpu_idx = i; 1470 break; 1471 } 1472 } 1473 if (res != VK_SUCCESS) 1474 break; 1475 } 1476 1477 1478 /* roll back on errors */ 1479 if (icd) { 1480 for (const struct loader_icd *tmp = inst->icds; tmp != icd; 1481 tmp = tmp->next) { 1482 for (uint32_t i = 0; i < icd->gpu_count; i++) 1483 (tmp->loader_dispatch + i)->DbgUnregisterMsgCallback(tmp->scanned_icds->instance, pfnMsgCallback); 1484 } 1485 /* and gpus on current icd */ 1486 for (uint32_t i = 0; i < gpu_idx; i++) 1487 (icd->loader_dispatch + i)->DbgUnregisterMsgCallback(icd->scanned_icds->instance, pfnMsgCallback); 1488 1489 return res; 1490 } 1491 1492 return VK_SUCCESS; 1493} 1494 1495LOADER_EXPORT VkResult VKAPI vkDbgUnregisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback) 1496{ 1497 VkResult res = VK_SUCCESS; 1498 struct loader_instance *inst; 1499 if (instance == VK_NULL_HANDLE) 1500 return VK_ERROR_INVALID_HANDLE; 1501 1502 assert(loader.icds_scanned); 1503 1504 for (inst = loader.instances; inst; inst = inst->next) { 1505 if ((VkInstance) inst == instance) 1506 break; 1507 } 1508 1509 if (inst == VK_NULL_HANDLE) 1510 return VK_ERROR_INVALID_HANDLE; 1511 1512 for (const struct loader_icd * icd = inst->icds; icd; icd = icd->next) { 1513 for (uint32_t i = 0; i < icd->gpu_count; i++) { 1514 VkResult r; 1515 r = (icd->loader_dispatch + i)->DbgUnregisterMsgCallback(icd->scanned_icds->instance, pfnMsgCallback); 1516 if (r != VK_SUCCESS) { 1517 res = r; 1518 } 1519 } 1520 } 1521 return res; 1522} 1523 1524LOADER_EXPORT VkResult VKAPI vkDbgSetGlobalOption(VkInstance instance, VK_DBG_GLOBAL_OPTION dbgOption, size_t dataSize, const void* pData) 1525{ 1526 VkResult res = VK_SUCCESS; 1527 struct loader_instance *inst; 1528 if (instance == VK_NULL_HANDLE) 1529 return VK_ERROR_INVALID_HANDLE; 1530 1531 assert(loader.icds_scanned); 1532 1533 for (inst = loader.instances; inst; inst = inst->next) { 1534 if ((VkInstance) inst == instance) 1535 break; 1536 } 1537 1538 if (inst == VK_NULL_HANDLE) 1539 return VK_ERROR_INVALID_HANDLE; 1540 for (const struct loader_icd * icd = inst->icds; icd; icd = icd->next) { 1541 for (uint32_t i = 0; i < icd->gpu_count; i++) { 1542 VkResult r; 1543 r = (icd->loader_dispatch + i)->DbgSetGlobalOption(icd->scanned_icds->instance, dbgOption, 1544 dataSize, pData); 1545 /* unfortunately we cannot roll back */ 1546 if (r != VK_SUCCESS) { 1547 res = r; 1548 } 1549 } 1550 } 1551 1552 return res; 1553} 1554