vulkaninfo.c revision ef72e2a7cf4fa5708fdbfbc1c972a24ad0dbf809
1/* 2 * Vulkan 3 * 4 * Copyright (C) 2014 LunarG, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included 14 * in all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24#include <stdlib.h> 25#include <stdio.h> 26#include <stdbool.h> 27#include <string.h> 28#include <assert.h> 29 30#ifdef _WIN32 31#include <Windows.h> 32#include <fcntl.h> 33#include <io.h> 34#endif 35 36#include "vk_wsi_swapchain.h" 37#include "vk_wsi_device_swapchain.h" 38 39#include <vulkan.h> 40 41#define ERR(err) printf("%s:%d: failed with %s\n", \ 42 __FILE__, __LINE__, vk_result_string(err)); 43 44#ifdef _WIN32 45 46#define snprintf _snprintf 47 48bool consoleCreated = false; 49 50#define WAIT_FOR_CONSOLE_DESTROY \ 51 do { \ 52 if (consoleCreated) \ 53 Sleep(INFINITE); \ 54 } while (0) 55#else 56 #define WAIT_FOR_CONSOLE_DESTROY 57#endif 58 59 60#define ERR_EXIT(err) \ 61 do { \ 62 ERR(err); \ 63 fflush(stdout); \ 64 WAIT_FOR_CONSOLE_DESTROY; \ 65 exit(-1); \ 66 } while (0) 67 68#if defined(NDEBUG) && defined(__GNUC__) 69#define U_ASSERT_ONLY __attribute__((unused)) 70#else 71#define U_ASSERT_ONLY 72#endif 73 74#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) 75 76#define MAX_GPUS 8 77 78#define MAX_QUEUE_TYPES 5 79#define APP_SHORT_NAME "vulkaninfo" 80 81struct app_gpu; 82 83struct app_dev { 84 struct app_gpu *gpu; /* point back to the GPU */ 85 86 VkDevice obj; 87 88 89 VkFormatProperties format_props[VK_FORMAT_NUM]; 90}; 91 92struct layer_extension_list { 93 VkLayerProperties layer_properties; 94 uint32_t extension_count; 95 VkExtensionProperties *extension_properties; 96}; 97 98struct app_instance { 99 VkInstance instance; 100 uint32_t global_layer_count; 101 struct layer_extension_list *global_layers; 102 uint32_t global_extension_count; 103 VkExtensionProperties *global_extensions; 104}; 105 106struct app_gpu { 107 uint32_t id; 108 VkPhysicalDevice obj; 109 110 VkPhysicalDeviceProperties props; 111 112 uint32_t queue_count; 113 VkQueueFamilyProperties *queue_props; 114 VkDeviceQueueCreateInfo *queue_reqs; 115 116 VkPhysicalDeviceMemoryProperties memory_props; 117 VkPhysicalDeviceFeatures features; 118 VkPhysicalDeviceLimits limits; 119 120 uint32_t device_layer_count; 121 struct layer_extension_list *device_layers; 122 123 uint32_t device_extension_count; 124 VkExtensionProperties *device_extensions; 125 126 struct app_dev dev; 127}; 128 129static const char *vk_result_string(VkResult err) 130{ 131 switch (err) { 132#define STR(r) case r: return #r 133 STR(VK_SUCCESS); 134 STR(VK_UNSUPPORTED); 135 STR(VK_NOT_READY); 136 STR(VK_TIMEOUT); 137 STR(VK_EVENT_SET); 138 STR(VK_EVENT_RESET); 139 STR(VK_ERROR_UNKNOWN); 140 STR(VK_ERROR_UNAVAILABLE); 141 STR(VK_ERROR_INITIALIZATION_FAILED); 142 STR(VK_ERROR_OUT_OF_HOST_MEMORY); 143 STR(VK_ERROR_OUT_OF_DEVICE_MEMORY); 144 STR(VK_ERROR_DEVICE_ALREADY_CREATED); 145 STR(VK_ERROR_DEVICE_LOST); 146 STR(VK_ERROR_INVALID_POINTER); 147 STR(VK_ERROR_INVALID_VALUE); 148 STR(VK_ERROR_INVALID_HANDLE); 149 STR(VK_ERROR_INVALID_ORDINAL); 150 STR(VK_ERROR_INVALID_MEMORY_SIZE); 151 STR(VK_ERROR_INVALID_EXTENSION); 152 STR(VK_ERROR_INVALID_FLAGS); 153 STR(VK_ERROR_INVALID_ALIGNMENT); 154 STR(VK_ERROR_INVALID_FORMAT); 155 STR(VK_ERROR_INVALID_IMAGE); 156 STR(VK_ERROR_INVALID_DESCRIPTOR_SET_DATA); 157 STR(VK_ERROR_INVALID_QUEUE_TYPE); 158 STR(VK_ERROR_UNSUPPORTED_SHADER_IL_VERSION); 159 STR(VK_ERROR_BAD_SHADER_CODE); 160 STR(VK_ERROR_BAD_PIPELINE_DATA); 161 STR(VK_ERROR_NOT_MAPPABLE); 162 STR(VK_ERROR_MEMORY_MAP_FAILED); 163 STR(VK_ERROR_MEMORY_UNMAP_FAILED); 164 STR(VK_ERROR_INCOMPATIBLE_DEVICE); 165 STR(VK_ERROR_INCOMPATIBLE_DRIVER); 166 STR(VK_ERROR_INCOMPLETE_COMMAND_BUFFER); 167 STR(VK_ERROR_BUILDING_COMMAND_BUFFER); 168 STR(VK_ERROR_MEMORY_NOT_BOUND); 169 STR(VK_ERROR_INCOMPATIBLE_QUEUE); 170#undef STR 171 default: return "UNKNOWN_RESULT"; 172 } 173} 174 175static const char *vk_physical_device_type_string(VkPhysicalDeviceType type) 176{ 177 switch (type) { 178#define STR(r) case VK_PHYSICAL_DEVICE_TYPE_ ##r: return #r 179 STR(OTHER); 180 STR(INTEGRATED_GPU); 181 STR(DISCRETE_GPU); 182 STR(VIRTUAL_GPU); 183#undef STR 184 default: return "UNKNOWN_DEVICE"; 185 } 186} 187 188static const char *vk_format_string(VkFormat fmt) 189{ 190 switch (fmt) { 191#define STR(r) case VK_FORMAT_ ##r: return #r 192 STR(UNDEFINED); 193 STR(R4G4_UNORM); 194 STR(R4G4_USCALED); 195 STR(R4G4B4A4_UNORM); 196 STR(R4G4B4A4_USCALED); 197 STR(R5G6B5_UNORM); 198 STR(R5G6B5_USCALED); 199 STR(R5G5B5A1_UNORM); 200 STR(R5G5B5A1_USCALED); 201 STR(R8_UNORM); 202 STR(R8_SNORM); 203 STR(R8_USCALED); 204 STR(R8_SSCALED); 205 STR(R8_UINT); 206 STR(R8_SINT); 207 STR(R8_SRGB); 208 STR(R8G8_UNORM); 209 STR(R8G8_SNORM); 210 STR(R8G8_USCALED); 211 STR(R8G8_SSCALED); 212 STR(R8G8_UINT); 213 STR(R8G8_SINT); 214 STR(R8G8_SRGB); 215 STR(R8G8B8_UNORM); 216 STR(R8G8B8_SNORM); 217 STR(R8G8B8_USCALED); 218 STR(R8G8B8_SSCALED); 219 STR(R8G8B8_UINT); 220 STR(R8G8B8_SINT); 221 STR(R8G8B8_SRGB); 222 STR(R8G8B8A8_UNORM); 223 STR(R8G8B8A8_SNORM); 224 STR(R8G8B8A8_USCALED); 225 STR(R8G8B8A8_SSCALED); 226 STR(R8G8B8A8_UINT); 227 STR(R8G8B8A8_SINT); 228 STR(R8G8B8A8_SRGB); 229 STR(R10G10B10A2_UNORM); 230 STR(R10G10B10A2_SNORM); 231 STR(R10G10B10A2_USCALED); 232 STR(R10G10B10A2_SSCALED); 233 STR(R10G10B10A2_UINT); 234 STR(R10G10B10A2_SINT); 235 STR(R16_UNORM); 236 STR(R16_SNORM); 237 STR(R16_USCALED); 238 STR(R16_SSCALED); 239 STR(R16_UINT); 240 STR(R16_SINT); 241 STR(R16_SFLOAT); 242 STR(R16G16_UNORM); 243 STR(R16G16_SNORM); 244 STR(R16G16_USCALED); 245 STR(R16G16_SSCALED); 246 STR(R16G16_UINT); 247 STR(R16G16_SINT); 248 STR(R16G16_SFLOAT); 249 STR(R16G16B16_UNORM); 250 STR(R16G16B16_SNORM); 251 STR(R16G16B16_USCALED); 252 STR(R16G16B16_SSCALED); 253 STR(R16G16B16_UINT); 254 STR(R16G16B16_SINT); 255 STR(R16G16B16_SFLOAT); 256 STR(R16G16B16A16_UNORM); 257 STR(R16G16B16A16_SNORM); 258 STR(R16G16B16A16_USCALED); 259 STR(R16G16B16A16_SSCALED); 260 STR(R16G16B16A16_UINT); 261 STR(R16G16B16A16_SINT); 262 STR(R16G16B16A16_SFLOAT); 263 STR(R32_UINT); 264 STR(R32_SINT); 265 STR(R32_SFLOAT); 266 STR(R32G32_UINT); 267 STR(R32G32_SINT); 268 STR(R32G32_SFLOAT); 269 STR(R32G32B32_UINT); 270 STR(R32G32B32_SINT); 271 STR(R32G32B32_SFLOAT); 272 STR(R32G32B32A32_UINT); 273 STR(R32G32B32A32_SINT); 274 STR(R32G32B32A32_SFLOAT); 275 STR(R64_SFLOAT); 276 STR(R64G64_SFLOAT); 277 STR(R64G64B64_SFLOAT); 278 STR(R64G64B64A64_SFLOAT); 279 STR(R11G11B10_UFLOAT); 280 STR(R9G9B9E5_UFLOAT); 281 STR(D16_UNORM); 282 STR(D24_UNORM); 283 STR(D32_SFLOAT); 284 STR(S8_UINT); 285 STR(D16_UNORM_S8_UINT); 286 STR(D24_UNORM_S8_UINT); 287 STR(D32_SFLOAT_S8_UINT); 288 STR(BC1_RGB_UNORM); 289 STR(BC1_RGB_SRGB); 290 STR(BC2_UNORM); 291 STR(BC2_SRGB); 292 STR(BC3_UNORM); 293 STR(BC3_SRGB); 294 STR(BC4_UNORM); 295 STR(BC4_SNORM); 296 STR(BC5_UNORM); 297 STR(BC5_SNORM); 298 STR(BC6H_UFLOAT); 299 STR(BC6H_SFLOAT); 300 STR(BC7_UNORM); 301 STR(BC7_SRGB); 302 STR(ETC2_R8G8B8_UNORM); 303 STR(ETC2_R8G8B8A1_UNORM); 304 STR(ETC2_R8G8B8A8_UNORM); 305 STR(EAC_R11_UNORM); 306 STR(EAC_R11_SNORM); 307 STR(EAC_R11G11_UNORM); 308 STR(EAC_R11G11_SNORM); 309 STR(ASTC_4x4_UNORM); 310 STR(ASTC_4x4_SRGB); 311 STR(ASTC_5x4_UNORM); 312 STR(ASTC_5x4_SRGB); 313 STR(ASTC_5x5_UNORM); 314 STR(ASTC_5x5_SRGB); 315 STR(ASTC_6x5_UNORM); 316 STR(ASTC_6x5_SRGB); 317 STR(ASTC_6x6_UNORM); 318 STR(ASTC_6x6_SRGB); 319 STR(ASTC_8x5_UNORM); 320 STR(ASTC_8x5_SRGB); 321 STR(ASTC_8x6_UNORM); 322 STR(ASTC_8x6_SRGB); 323 STR(ASTC_8x8_UNORM); 324 STR(ASTC_8x8_SRGB); 325 STR(ASTC_10x5_UNORM); 326 STR(ASTC_10x5_SRGB); 327 STR(ASTC_10x6_UNORM); 328 STR(ASTC_10x6_SRGB); 329 STR(ASTC_10x8_UNORM); 330 STR(ASTC_10x8_SRGB); 331 STR(ASTC_10x10_UNORM); 332 STR(ASTC_10x10_SRGB); 333 STR(ASTC_12x10_UNORM); 334 STR(ASTC_12x10_SRGB); 335 STR(ASTC_12x12_UNORM); 336 STR(ASTC_12x12_SRGB); 337 STR(B5G6R5_UNORM); 338 STR(B5G6R5_USCALED); 339 STR(B8G8R8_UNORM); 340 STR(B8G8R8_SNORM); 341 STR(B8G8R8_USCALED); 342 STR(B8G8R8_SSCALED); 343 STR(B8G8R8_UINT); 344 STR(B8G8R8_SINT); 345 STR(B8G8R8_SRGB); 346 STR(B8G8R8A8_UNORM); 347 STR(B8G8R8A8_SNORM); 348 STR(B8G8R8A8_USCALED); 349 STR(B8G8R8A8_SSCALED); 350 STR(B8G8R8A8_UINT); 351 STR(B8G8R8A8_SINT); 352 STR(B8G8R8A8_SRGB); 353 STR(B10G10R10A2_UNORM); 354 STR(B10G10R10A2_SNORM); 355 STR(B10G10R10A2_USCALED); 356 STR(B10G10R10A2_SSCALED); 357 STR(B10G10R10A2_UINT); 358 STR(B10G10R10A2_SINT); 359#undef STR 360 default: return "UNKNOWN_FORMAT"; 361 } 362} 363 364static void app_dev_init_formats(struct app_dev *dev) 365{ 366 VkFormat f; 367 368 for (f = 0; f < VK_FORMAT_NUM; f++) { 369 const VkFormat fmt = f; 370 VkResult err; 371 372 err = vkGetPhysicalDeviceFormatProperties(dev->gpu->obj, fmt, &dev->format_props[f]); 373 if (err) { 374 memset(&dev->format_props[f], 0, 375 sizeof(dev->format_props[f])); 376 } 377 } 378} 379 380static void extract_version(uint32_t version, uint32_t *major, uint32_t *minor, uint32_t *patch) 381{ 382 *major = version >> 22; 383 *minor = (version >> 12) & 0x3ff; 384 *patch = version & 0xfff; 385} 386 387static void app_get_physical_device_layer_extensions( 388 struct app_gpu *gpu, 389 char *layer_name, 390 uint32_t *extension_count, 391 VkExtensionProperties **extension_properties) 392{ 393 VkResult err; 394 uint32_t ext_count = 0; 395 VkExtensionProperties *ext_ptr = NULL; 396 397 /* repeat get until VK_INCOMPLETE goes away */ 398 do { 399 err = vkGetPhysicalDeviceExtensionProperties(gpu->obj, layer_name, &ext_count, NULL); 400 assert(!err); 401 402 if (ext_ptr) { 403 free(ext_ptr); 404 } 405 ext_ptr = malloc(ext_count * sizeof(VkExtensionProperties)); 406 err = vkGetPhysicalDeviceExtensionProperties(gpu->obj, layer_name, &ext_count, ext_ptr); 407 } while (err == VK_INCOMPLETE); 408 assert(!err); 409 410 *extension_count = ext_count; 411 *extension_properties = ext_ptr; 412} 413 414static void app_dev_init(struct app_dev *dev, struct app_gpu *gpu) 415{ 416 VkDeviceCreateInfo info = { 417 .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, 418 .pNext = NULL, 419 .queueRecordCount = 0, 420 .pRequestedQueues = NULL, 421 .layerCount = 0, 422 .ppEnabledLayerNames = NULL, 423 .extensionCount = 0, 424 .ppEnabledExtensionNames = NULL, 425 .flags = 0, 426 }; 427 VkResult U_ASSERT_ONLY err; 428 // Extensions to enable 429 static const char *known_extensions[] = { 430 VK_WSI_DEVICE_SWAPCHAIN_EXTENSION_NAME, 431 }; 432 433 uint32_t count = 0; 434 435 /* Scan layers */ 436 VkLayerProperties *device_layer_properties = NULL; 437 struct layer_extension_list *device_layers = NULL; 438 439 do { 440 err = vkGetPhysicalDeviceLayerProperties(gpu->obj, &count, NULL); 441 assert(!err); 442 443 if (device_layer_properties) { 444 free(device_layer_properties); 445 } 446 device_layer_properties = malloc(sizeof(VkLayerProperties) * count); 447 assert(device_layer_properties); 448 449 if (device_layers) { 450 free(device_layers); 451 } 452 device_layers = malloc(sizeof(struct layer_extension_list) * count); 453 assert(device_layers); 454 455 err = vkGetPhysicalDeviceLayerProperties(gpu->obj, &count, device_layer_properties); 456 } while (err == VK_INCOMPLETE); 457 assert(!err); 458 459 gpu->device_layer_count = count; 460 gpu->device_layers = device_layers; 461 462 for (uint32_t i = 0; i < gpu->device_layer_count; i++) { 463 VkLayerProperties *src_info = &device_layer_properties[i]; 464 struct layer_extension_list *dst_info = &gpu->device_layers[i]; 465 memcpy(&dst_info->layer_properties, src_info, sizeof(VkLayerProperties)); 466 467 /* Save away layer extension info for report */ 468 app_get_physical_device_layer_extensions( 469 gpu, 470 src_info->layerName, 471 &dst_info->extension_count, 472 &dst_info->extension_properties); 473 } 474 free(device_layer_properties); 475 476 app_get_physical_device_layer_extensions( 477 gpu, 478 NULL, 479 &gpu->device_extension_count, 480 &gpu->device_extensions); 481 482 fflush(stdout); 483 484 uint32_t enabled_extension_count = 0; 485 uint32_t known_extension_count = ARRAY_SIZE(known_extensions); 486 487 for (uint32_t i = 0; i < known_extension_count; i++) { 488 VkBool32 extension_found = 0; 489 for (uint32_t j = 0; j < gpu->device_extension_count; j++) { 490 VkExtensionProperties *ext_prop = &gpu->device_extensions[j]; 491 if (!strcmp(known_extensions[i], ext_prop->extName)) { 492 493 extension_found = 1; 494 enabled_extension_count++; 495 } 496 } 497 if (!extension_found) { 498 printf("Cannot find extension: %s\n", known_extensions[i]); 499 ERR_EXIT(VK_ERROR_INVALID_EXTENSION); 500 } 501 } 502 503 /* request all queues */ 504 info.queueRecordCount = gpu->queue_count; 505 info.pRequestedQueues = gpu->queue_reqs; 506 507 info.layerCount = 0; 508 info.ppEnabledLayerNames = NULL; 509 info.extensionCount = enabled_extension_count; 510 info.ppEnabledExtensionNames = (const char*const*) known_extensions; 511 dev->gpu = gpu; 512 err = vkCreateDevice(gpu->obj, &info, &dev->obj); 513 if (err) 514 ERR_EXIT(err); 515 516} 517 518static void app_dev_destroy(struct app_dev *dev) 519{ 520 vkDestroyDevice(dev->obj); 521} 522 523static void app_get_global_layer_extensions( 524 char *layer_name, 525 uint32_t *extension_count, 526 VkExtensionProperties **extension_properties) 527{ 528 VkResult err; 529 uint32_t ext_count = 0; 530 VkExtensionProperties *ext_ptr = NULL; 531 532 /* repeat get until VK_INCOMPLETE goes away */ 533 do { 534 err = vkGetGlobalExtensionProperties(layer_name, &ext_count, NULL); 535 assert(!err); 536 537 if (ext_ptr) { 538 free(ext_ptr); 539 } 540 ext_ptr = malloc(ext_count * sizeof(VkExtensionProperties)); 541 err = vkGetGlobalExtensionProperties(layer_name, &ext_count, ext_ptr); 542 } while (err == VK_INCOMPLETE); 543 assert(!err); 544 545 *extension_count = ext_count; 546 *extension_properties = ext_ptr; 547} 548 549static void app_create_instance(struct app_instance *inst) 550{ 551 const VkApplicationInfo app_info = { 552 .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, 553 .pNext = NULL, 554 .pAppName = APP_SHORT_NAME, 555 .appVersion = 1, 556 .pEngineName = APP_SHORT_NAME, 557 .engineVersion = 1, 558 .apiVersion = VK_API_VERSION, 559 }; 560 VkInstanceCreateInfo inst_info = { 561 .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, 562 .pNext = NULL, 563 .pAppInfo = &app_info, 564 .pAllocCb = NULL, 565 .layerCount = 0, 566 .ppEnabledLayerNames = NULL, 567 .extensionCount = 0, 568 .ppEnabledExtensionNames = NULL, 569 }; 570 VkResult U_ASSERT_ONLY err; 571 // Global Extensions to enable 572 static char *known_extensions[] = { 573 "VK_WSI_swapchain", 574 }; 575 576 uint32_t global_extension_count = 0; 577 uint32_t count = 0; 578 579 /* Scan layers */ 580 VkLayerProperties *global_layer_properties = NULL; 581 struct layer_extension_list *global_layers = NULL; 582 583 do { 584 err = vkGetGlobalLayerProperties(&count, NULL); 585 assert(!err); 586 587 if (global_layer_properties) { 588 free(global_layer_properties); 589 } 590 global_layer_properties = malloc(sizeof(VkLayerProperties) * count); 591 assert(global_layer_properties); 592 593 if (global_layers) { 594 free(global_layers); 595 } 596 global_layers = malloc(sizeof(struct layer_extension_list) * count); 597 assert(global_layers); 598 599 err = vkGetGlobalLayerProperties(&count, global_layer_properties); 600 } while (err == VK_INCOMPLETE); 601 assert(!err); 602 603 inst->global_layer_count = count; 604 inst->global_layers = global_layers; 605 606 for (uint32_t i = 0; i < inst->global_layer_count; i++) { 607 VkLayerProperties *src_info = &global_layer_properties[i]; 608 struct layer_extension_list *dst_info = &inst->global_layers[i]; 609 memcpy(&dst_info->layer_properties, src_info, sizeof(VkLayerProperties)); 610 611 /* Save away layer extension info for report */ 612 app_get_global_layer_extensions( 613 src_info->layerName, 614 &dst_info->extension_count, 615 &dst_info->extension_properties); 616 } 617 free(global_layer_properties); 618 619 /* Collect global extensions */ 620 inst->global_extension_count = 0; 621 app_get_global_layer_extensions( 622 NULL, 623 &inst->global_extension_count, 624 &inst->global_extensions); 625 626 for (uint32_t i = 0; i < ARRAY_SIZE(known_extensions); i++) { 627 VkBool32 extension_found = 0; 628 for (uint32_t j = 0; j < inst->global_extension_count; j++) { 629 VkExtensionProperties *extension_prop = &inst->global_extensions[j]; 630 if (!strcmp(known_extensions[i], extension_prop->extName)) { 631 632 extension_found = 1; 633 global_extension_count++; 634 } 635 } 636 if (!extension_found) { 637 printf("Cannot find extension: %s\n", known_extensions[i]); 638 ERR_EXIT(VK_ERROR_INVALID_EXTENSION); 639 } 640 } 641 642 inst_info.extensionCount = global_extension_count; 643 inst_info.ppEnabledExtensionNames = (const char * const *) known_extensions; 644 645 err = vkCreateInstance(&inst_info, &inst->instance); 646 if (err == VK_ERROR_INCOMPATIBLE_DRIVER) { 647 printf("Cannot create Vulkan instance.\n"); 648 ERR_EXIT(err); 649 } else if (err) { 650 ERR_EXIT(err); 651 } 652} 653 654static void app_destroy_instance(struct app_instance *inst) 655{ 656 free(inst->global_extensions); 657 vkDestroyInstance(inst->instance); 658} 659 660 661static void app_gpu_init(struct app_gpu *gpu, uint32_t id, VkPhysicalDevice obj) 662{ 663 VkResult err; 664 uint32_t i; 665 666 memset(gpu, 0, sizeof(*gpu)); 667 668 gpu->id = id; 669 gpu->obj = obj; 670 671 err = vkGetPhysicalDeviceProperties(gpu->obj, &gpu->props); 672 if (err) 673 ERR_EXIT(err); 674 675 /* get queue count */ 676 err = vkGetPhysicalDeviceQueueFamilyProperties(gpu->obj, &gpu->queue_count, NULL); 677 if (err) 678 ERR_EXIT(err); 679 680 gpu->queue_props = 681 malloc(sizeof(gpu->queue_props[0]) * gpu->queue_count); 682 683 if (!gpu->queue_props) 684 ERR_EXIT(VK_ERROR_OUT_OF_HOST_MEMORY); 685 err = vkGetPhysicalDeviceQueueFamilyProperties(gpu->obj, &gpu->queue_count, gpu->queue_props); 686 if (err) 687 ERR_EXIT(err); 688 689 /* set up queue requests */ 690 gpu->queue_reqs = malloc(sizeof(*gpu->queue_reqs) * gpu->queue_count); 691 if (!gpu->queue_reqs) 692 ERR_EXIT(VK_ERROR_OUT_OF_HOST_MEMORY); 693 for (i = 0; i < gpu->queue_count; i++) { 694 gpu->queue_reqs[i].queueFamilyIndex = i; 695 gpu->queue_reqs[i].queueCount = gpu->queue_props[i].queueCount; 696 } 697 698 err = vkGetPhysicalDeviceMemoryProperties(gpu->obj, &gpu->memory_props); 699 if (err) 700 ERR_EXIT(err); 701 702 err = vkGetPhysicalDeviceFeatures(gpu->obj, &gpu->features); 703 if (err) 704 ERR_EXIT(err); 705 706 err = vkGetPhysicalDeviceLimits(gpu->obj, &gpu->limits); 707 if (err) 708 ERR_EXIT(err); 709 710 app_dev_init(&gpu->dev, gpu); 711 app_dev_init_formats(&gpu->dev); 712} 713 714static void app_gpu_destroy(struct app_gpu *gpu) 715{ 716 app_dev_destroy(&gpu->dev); 717 free(gpu->device_extensions); 718 free(gpu->queue_reqs); 719 free(gpu->queue_props); 720} 721 722static void app_dev_dump_format_props(const struct app_dev *dev, VkFormat fmt) 723{ 724 const VkFormatProperties *props = &dev->format_props[fmt]; 725 struct { 726 const char *name; 727 VkFlags flags; 728 } tilings[2]; 729 uint32_t i; 730 731 if (!props->linearTilingFeatures && !props->optimalTilingFeatures) 732 return; 733 734 tilings[0].name = "linear"; 735 tilings[0].flags = props->linearTilingFeatures; 736 tilings[1].name = "optimal"; 737 tilings[1].flags = props->optimalTilingFeatures; 738 739 printf("FORMAT_%s\n", vk_format_string(fmt)); 740 for (i = 0; i < ARRAY_SIZE(tilings); i++) { 741 if (!tilings[i].flags) 742 continue; 743 744 printf("\t%s tiling image =%s%s%s\n", tilings[i].name, 745 (tilings[i].flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) ? " sampled" : "", 746 (tilings[i].flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) ? " storage" : "", 747 (tilings[i].flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT) ? " atomic" : ""); 748 printf("\t%s tiling texel =%s%s%s\n", tilings[i].name, 749 (tilings[i].flags & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) ? " TBO" : "", 750 (tilings[i].flags & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) ? " IBO" : "", 751 (tilings[i].flags & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT) ? " atomic" : ""); 752 printf("\t%s tiling attachment =%s%s%s\n", tilings[i].name, 753 (tilings[i].flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) ? " color" : "", 754 (tilings[i].flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT) ? " blend" : "", 755 (tilings[i].flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) ? " depth/stencil" : ""); 756 printf("\t%s tiling vertex = %u\n", tilings[i].name, 757 (bool) (tilings[i].flags & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)); 758 printf("\t%s tiling conversion = %u\n", tilings[i].name, 759 (bool) (tilings[i].flags & VK_FORMAT_FEATURE_CONVERSION_BIT)); 760 } 761} 762 763 764static void 765app_dev_dump(const struct app_dev *dev) 766{ 767 VkFormat fmt; 768 769 for (fmt = 0; fmt < VK_FORMAT_NUM; fmt++) { 770 app_dev_dump_format_props(dev, fmt); 771 } 772} 773 774#ifdef _WIN32 775#define PRINTF_SIZE_T_SPECIFIER "%Iu" 776#else 777#define PRINTF_SIZE_T_SPECIFIER "%zu" 778#endif 779 780static void app_gpu_dump_features(const struct app_gpu *gpu) 781{ 782 const VkPhysicalDeviceFeatures *features = &gpu->features; 783 784 printf("VkPhysicalDeviceFeatures\n"); 785 /* TODO: add interesting features */ 786 printf("\tgeometryShader = %u\n", features->geometryShader); 787} 788 789static void app_gpu_dump_limits(const struct app_gpu *gpu) 790{ 791 const VkPhysicalDeviceLimits *limits = &gpu->limits; 792 793 printf("VkPhysicalDeviceLimits\n"); 794 /* TODO: add interesting limits */ 795 printf("\tmaxBoundDescriptorSets = %u\n", limits->maxBoundDescriptorSets); 796 printf("\tmaxComputeWorkGroupInvocations = %u\n", limits->maxComputeWorkGroupInvocations); 797 printf("\ttimestampFrequency = %lu\n", limits->timestampFrequency); 798} 799 800static void app_gpu_dump_props(const struct app_gpu *gpu) 801{ 802 const VkPhysicalDeviceProperties *props = &gpu->props; 803 804 printf("VkPhysicalDeviceProperties\n"); 805 printf("\tapiVersion = %u\n", props->apiVersion); 806 printf("\tdriverVersion = %u\n", props->driverVersion); 807 printf("\tvendorId = 0x%04x\n", props->vendorId); 808 printf("\tdeviceId = 0x%04x\n", props->deviceId); 809 printf("\tdeviceType = %s\n", vk_physical_device_type_string(props->deviceType)); 810 printf("\tdeviceName = %s\n", props->deviceName); 811 fflush(stdout); 812} 813 814static void app_dump_extensions( 815 const char *indent, 816 const char *layer_name, 817 const uint32_t extension_count, 818 const VkExtensionProperties *extension_properties) 819{ 820 uint32_t i; 821 if (layer_name && (strlen(layer_name) > 0)) { 822 printf("%s%s Extensions", indent, layer_name); 823 } else { 824 printf("Extensions"); 825 } 826 printf("\tcount = %d\n", extension_count); 827 for (i=0; i< extension_count; i++) { 828 uint32_t major, minor, patch; 829 char spec_version[64]; 830 VkExtensionProperties const *ext_prop = &extension_properties[i]; 831 832 if (i>0) 833 printf("\n"); // separator between extensions 834 835 printf("%s\t", indent); 836 extract_version(ext_prop->specVersion, &major, &minor, &patch); 837 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", major, minor, patch); 838 printf("%s: extension version %s", 839 ext_prop->extName, spec_version); 840 } 841 printf("\n"); 842 fflush(stdout); 843} 844 845static void app_gpu_dump_queue_props(const struct app_gpu *gpu, uint32_t id) 846{ 847 const VkQueueFamilyProperties *props = &gpu->queue_props[id]; 848 849 printf("VkQueueFamilyProperties[%d]\n", id); 850 printf("\tqueueFlags = %c%c%c%c\n", 851 (props->queueFlags & VK_QUEUE_GRAPHICS_BIT) ? 'G' : '.', 852 (props->queueFlags & VK_QUEUE_COMPUTE_BIT) ? 'C' : '.', 853 (props->queueFlags & VK_QUEUE_DMA_BIT) ? 'D' : '.', 854 (props->queueFlags & VK_QUEUE_EXTENDED_BIT) ? 'X' : '.'); 855 printf("\tqueueCount = %u\n", props->queueCount); 856 printf("\tsupportsTimestamps = %u\n", props->supportsTimestamps); 857 fflush(stdout); 858} 859 860static void app_gpu_dump_memory_props(const struct app_gpu *gpu) 861{ 862 const VkPhysicalDeviceMemoryProperties *props = &gpu->memory_props; 863 864 printf("VkPhysicalDeviceMemoryProperties\n"); 865 printf("\tmemoryTypeCount = %u\n", props->memoryTypeCount); 866 for (uint32_t i = 0; i < props->memoryTypeCount; i++) { 867 printf("\tmemoryTypes[%u] : \n", i); 868 printf("\t\tpropertyFlags = %u\n", props->memoryTypes[i].propertyFlags); 869 printf("\t\theapIndex = %u\n", props->memoryTypes[i].heapIndex); 870 } 871 printf("\tmemoryHeapCount = %u\n", props->memoryHeapCount); 872 for (uint32_t i = 0; i < props->memoryHeapCount; i++) { 873 printf("\tmemoryHeaps[%u] : \n", i); 874 printf("\t\tsize = " PRINTF_SIZE_T_SPECIFIER "\n", props->memoryHeaps[i].size); 875 } 876 fflush(stdout); 877} 878 879static void app_gpu_dump(const struct app_gpu *gpu) 880{ 881 uint32_t i; 882 883 printf("GPU%u\n", gpu->id); 884 app_gpu_dump_props(gpu); 885 printf("\n"); 886 app_dump_extensions("", "", gpu->device_extension_count, gpu->device_extensions); 887 printf("\n"); 888 printf("Layers\tcount = %d\n", gpu->device_layer_count); 889 for (uint32_t i = 0; i < gpu->device_layer_count; i++) { 890 uint32_t major, minor, patch; 891 char spec_version[64], layer_version[64]; 892 struct layer_extension_list const *layer_info = &gpu->device_layers[i]; 893 894 extract_version(layer_info->layer_properties.specVersion, &major, &minor, &patch); 895 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", major, minor, patch); 896 extract_version(layer_info->layer_properties.implVersion, &major, &minor, &patch); 897 snprintf(layer_version, sizeof(layer_version), "%d.%d.%d", major, minor, patch); 898 printf("\t%s (%s) Vulkan version %s, layer version %s\n", 899 layer_info->layer_properties.layerName, 900 (char*) layer_info->layer_properties.description, 901 spec_version, layer_version); 902 903 app_dump_extensions("\t", 904 layer_info->layer_properties.layerName, 905 layer_info->extension_count, 906 layer_info->extension_properties); 907 fflush(stdout); 908 } 909 printf("\n"); 910 for (i = 0; i < gpu->queue_count; i++) { 911 app_gpu_dump_queue_props(gpu, i); 912 printf("\n"); 913 } 914 app_gpu_dump_memory_props(gpu); 915 printf("\n"); 916 app_gpu_dump_features(gpu); 917 printf("\n"); 918 app_gpu_dump_limits(gpu); 919 printf("\n"); 920 app_dev_dump(&gpu->dev); 921} 922 923int main(int argc, char **argv) 924{ 925 struct app_gpu gpus[MAX_GPUS]; 926 VkPhysicalDevice objs[MAX_GPUS]; 927 uint32_t gpu_count, i; 928 VkResult err; 929 struct app_instance inst; 930 931 app_create_instance(&inst); 932 app_dump_extensions("", "Global", inst.global_extension_count, inst.global_extensions); 933 934 printf("Global Layers\tcount = %d\n", inst.global_layer_count); 935 for (uint32_t i = 0; i < inst.global_layer_count; i++) { 936 uint32_t major, minor, patch; 937 char spec_version[64], layer_version[64]; 938 VkLayerProperties const *layer_prop = &inst.global_layers[i].layer_properties; 939 940 extract_version(layer_prop->specVersion, &major, &minor, &patch); 941 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", major, minor, patch); 942 extract_version(layer_prop->implVersion, &major, &minor, &patch); 943 snprintf(layer_version, sizeof(layer_version), "%d.%d.%d", major, minor, patch); 944 printf("\t%s (%s) Vulkan version %s, layer version %s\n", 945 layer_prop->layerName, (char*) layer_prop->description, spec_version, layer_version); 946 947 app_dump_extensions("\t", 948 inst.global_layers[i].layer_properties.layerName, 949 inst.global_layers[i].extension_count, 950 inst.global_layers[i].extension_properties); 951 } 952 953 err = vkEnumeratePhysicalDevices(inst.instance, &gpu_count, NULL); 954 if (err) 955 ERR_EXIT(err); 956 if (gpu_count > MAX_GPUS) { 957 printf("Too many GPUS found \n"); 958 ERR_EXIT(VK_ERROR_UNKNOWN); 959 } 960 err = vkEnumeratePhysicalDevices(inst.instance, &gpu_count, objs); 961 if (err) 962 ERR_EXIT(err); 963 964 for (i = 0; i < gpu_count; i++) { 965 app_gpu_init(&gpus[i], i, objs[i]); 966 app_gpu_dump(&gpus[i]); 967 printf("\n\n"); 968 } 969 970 for (i = 0; i < gpu_count; i++) 971 app_gpu_destroy(&gpus[i]); 972 973 app_destroy_instance(&inst); 974 975 return 0; 976} 977 978#ifdef _WIN32 979 980// Create a console window with a large scrollback size to which to send stdout. 981// Returns true if console window was successfully created, false otherwise. 982bool SetStdOutToNewConsole() 983{ 984 // don't do anything if we already have a console 985 if (GetStdHandle(STD_OUTPUT_HANDLE)) 986 return false; 987 988 // allocate a console for this app 989 AllocConsole(); 990 991 // redirect unbuffered STDOUT to the console 992 HANDLE consoleHandle = GetStdHandle(STD_OUTPUT_HANDLE); 993 int fileDescriptor = _open_osfhandle((intptr_t)consoleHandle, _O_TEXT); 994 FILE *fp = _fdopen( fileDescriptor, "w" ); 995 *stdout = *fp; 996 setvbuf( stdout, NULL, _IONBF, 0 ); 997 998 // make the console window bigger 999 CONSOLE_SCREEN_BUFFER_INFO csbi; 1000 SMALL_RECT r; 1001 COORD bufferSize; 1002 if (!GetConsoleScreenBufferInfo(consoleHandle, &csbi)) 1003 return false; 1004 bufferSize.X = csbi.dwSize.X; 1005 bufferSize.Y = 1000; 1006 if (!SetConsoleScreenBufferSize(consoleHandle, bufferSize)) 1007 return false; 1008 r.Left = r.Top = 0; 1009 r.Right = csbi.dwSize.X-1; 1010 r.Bottom = 60; 1011 if (!SetConsoleWindowInfo(consoleHandle, true, &r)) 1012 return false; 1013 1014 // change the console window title 1015 if (!SetConsoleTitle(TEXT(APP_SHORT_NAME))) 1016 return false; 1017 1018 return true; 1019} 1020 1021int WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR pCmdLine, int nCmdShow) 1022{ 1023 char *argv = pCmdLine; 1024 consoleCreated = SetStdOutToNewConsole(); 1025 main(1, &argv); 1026 fflush(stdout); 1027 if (consoleCreated) 1028 Sleep(INFINITE); 1029} 1030#endif 1031