vktestbinding.cpp revision ba0836f623e3a756bf0cac72d5a688430d2a3420
1// VK tests 2// 3// Copyright (C) 2014 LunarG, Inc. 4// 5// Permission is hereby granted, free of charge, to any person obtaining a 6// copy of this software and associated documentation files (the "Software"), 7// to deal in the Software without restriction, including without limitation 8// the rights to use, copy, modify, merge, publish, distribute, sublicense, 9// and/or sell copies of the Software, and to permit persons to whom the 10// Software is furnished to do so, subject to the following conditions: 11// 12// The above copyright notice and this permission notice shall be included 13// in all copies or substantial portions of the Software. 14// 15// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21// DEALINGS IN THE SOFTWARE. 22 23#include <iostream> 24#include <string.h> // memset(), memcmp() 25#include <assert.h> 26#include "vktestbinding.h" 27 28namespace { 29 30#define NON_DISPATCHABLE_HANDLE_INIT(create_func, dev, ...) \ 31 do { \ 32 handle_type handle; \ 33 if (EXPECT(create_func(dev.handle(), __VA_ARGS__, &handle) == VK_SUCCESS)) \ 34 NonDispHandle::init(dev.handle(), handle); \ 35 } while (0) 36 37#define DERIVED_OBJECT_TYPE_INIT(create_func, dev, vk_object_type, ...) \ 38 do { \ 39 obj_type obj; \ 40 dev_ = &dev; \ 41 if (EXPECT(create_func(dev.handle(), __VA_ARGS__, &obj) == VK_SUCCESS)) \ 42 base_type::init(obj, vk_object_type); \ 43 } while (0) 44 45#define STRINGIFY(x) #x 46#define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__)) 47 48 49vk_testing::ErrorCallback error_callback; 50 51bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function) 52{ 53 if (error_callback) { 54 error_callback(expr, file, line, function); 55 } else { 56 std::cerr << file << ":" << line << ": " << function << 57 ": Expectation `" << expr << "' failed.\n"; 58 } 59 60 return false; 61} 62 63template<class T, class S> 64std::vector<T> make_objects(const std::vector<S> &v) 65{ 66 std::vector<T> objs; 67 objs.reserve(v.size()); 68 for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++) 69 objs.push_back((*it)->obj()); 70 return objs; 71} 72 73template<typename T> 74std::vector<T> get_memory_reqs(VkDevice device, VkObjectType obj_type, VkObject obj, size_t min_elems) 75{ 76 std::vector<T> info; 77 78 info.resize((min_elems > 0)?min_elems:1); 79 if (!EXPECT(vkGetObjectMemoryRequirements(device, obj_type, obj, &info[0]) == VK_SUCCESS)) 80 info.clear(); 81 82 if (info.size() < min_elems) 83 info.resize(min_elems); 84 85 return info; 86} 87} // namespace 88 89namespace vk_testing { 90 91void set_error_callback(ErrorCallback callback) 92{ 93 error_callback = callback; 94} 95 96VkPhysicalDeviceProperties PhysicalDevice::properties() const 97{ 98 VkPhysicalDeviceProperties info; 99 100 EXPECT(vkGetPhysicalDeviceProperties(handle(), &info) == VK_SUCCESS); 101 102 return info; 103} 104 105VkPhysicalDevicePerformance PhysicalDevice::performance() const 106{ 107 VkPhysicalDevicePerformance info; 108 109 EXPECT(vkGetPhysicalDevicePerformance(handle(), &info) == VK_SUCCESS); 110 111 return info; 112} 113 114std::vector<VkPhysicalDeviceQueueProperties> PhysicalDevice::queue_properties() const 115{ 116 std::vector<VkPhysicalDeviceQueueProperties> info; 117 uint32_t count; 118 119 if (EXPECT(vkGetPhysicalDeviceQueueCount(handle(), &count) == VK_SUCCESS)) { 120 info.resize(count); 121 if (!EXPECT(vkGetPhysicalDeviceQueueProperties(handle(), count, &info[0]) == VK_SUCCESS)) 122 info.clear(); 123 } 124 125 return info; 126} 127 128VkPhysicalDeviceMemoryProperties PhysicalDevice::memory_properties() const 129{ 130 VkPhysicalDeviceMemoryProperties info; 131 132 EXPECT(vkGetPhysicalDeviceMemoryProperties(handle(), &info) == VK_SUCCESS); 133 134 135 return info; 136} 137 138/* 139 * Return list of Global layers available 140 */ 141std::vector<VkLayerProperties> GetGlobalLayers() 142{ 143 VkResult err; 144 std::vector<VkLayerProperties> layers; 145 uint32_t layer_count; 146 147 do { 148 layer_count = 0; 149 err = vkGetGlobalLayerProperties(&layer_count, NULL); 150 151 if (err == VK_SUCCESS) { 152 layers.reserve(layer_count); 153 err = vkGetGlobalLayerProperties(&layer_count, &layers[0]); 154 } 155 } while (err == VK_INCOMPLETE); 156 157 assert(err == VK_SUCCESS); 158 159 return layers; 160} 161 162/* 163 * Return list of Global extensions provided by the ICD / Loader 164 */ 165std::vector<VkExtensionProperties> GetGlobalExtensions() 166{ 167 return GetGlobalExtensions(NULL); 168} 169 170/* 171 * Return list of Global extensions provided by the specified layer 172 * If pLayerName is NULL, will return extensions implemented by the loader / ICDs 173 */ 174std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName) 175{ 176 std::vector<VkExtensionProperties> exts; 177 uint32_t ext_count; 178 VkResult err; 179 180 do { 181 ext_count = 0; 182 err = vkGetGlobalExtensionProperties(pLayerName, &ext_count, NULL); 183 184 if (err == VK_SUCCESS) { 185 exts.resize(ext_count); 186 err = vkGetGlobalExtensionProperties(pLayerName, &ext_count, &exts[0]); 187 } 188 } while (err == VK_INCOMPLETE); 189 190 assert(err == VK_SUCCESS); 191 192 return exts; 193} 194 195/* 196 * Return list of PhysicalDevice extensions provided by the ICD / Loader 197 */ 198std::vector<VkExtensionProperties> PhysicalDevice::extensions() const 199{ 200 return extensions(NULL); 201} 202 203/* 204 * Return list of PhysicalDevice extensions provided by the specified layer 205 * If pLayerName is NULL, will return extensions for ICD / loader. 206 */ 207std::vector<VkExtensionProperties> PhysicalDevice::extensions(const char *pLayerName) const 208{ 209 std::vector<VkExtensionProperties> exts; 210 VkResult err; 211 212 do { 213 uint32_t extCount = 0; 214 err = vkGetPhysicalDeviceExtensionProperties(handle(), pLayerName, &extCount, NULL); 215 216 if (err == VK_SUCCESS) { 217 exts.reserve(extCount); 218 err = vkGetPhysicalDeviceExtensionProperties(handle(), pLayerName, &extCount, &exts[0]); 219 } 220 } while (err == VK_INCOMPLETE); 221 222 assert(err == VK_SUCCESS); 223 224 return exts; 225} 226 227VkResult PhysicalDevice::set_memory_type(const uint32_t type_bits, VkMemoryAllocInfo *info, const VkFlags properties) const 228{ 229 uint32_t type_mask = type_bits; 230 // Search memtypes to find first index with those properties 231 for (uint32_t i = 0; i < 32; i++) { 232 if ((type_mask & 1) == 1) { 233 // Type is available, does it match user properties? 234 if ((memory_properties_.memoryTypes[i].propertyFlags & properties) == properties) { 235 info->memoryTypeIndex = i; 236 return VK_SUCCESS; 237 } 238 } 239 type_mask >>= 1; 240 } 241 // No memory types matched, return failure 242 return VK_UNSUPPORTED; 243} 244 245/* 246 * Return list of PhysicalDevice layers 247 */ 248std::vector<VkLayerProperties> PhysicalDevice::layers() const 249{ 250 std::vector<VkLayerProperties> layer_props; 251 VkResult err; 252 253 do { 254 uint32_t layer_count = 0; 255 err = vkGetPhysicalDeviceLayerProperties(handle(), &layer_count, NULL); 256 257 if (err == VK_SUCCESS) { 258 layer_props.reserve(layer_count); 259 err = vkGetPhysicalDeviceLayerProperties(handle(), &layer_count, &layer_props[0]); 260 } 261 } while (err == VK_INCOMPLETE); 262 263 assert(err == VK_SUCCESS); 264 265 return layer_props; 266} 267 268void BaseObject::init(VkObject obj, VkObjectType type, bool own) 269{ 270 EXPECT(!initialized()); 271 reinit(obj, type, own); 272} 273 274void BaseObject::reinit(VkObject obj, VkObjectType type, bool own) 275{ 276 obj_ = obj; 277 object_type_ = type; 278 own_obj_ = own; 279} 280 281uint32_t Object::memory_allocation_count() const 282{ 283 return 1; 284} 285 286std::vector<VkMemoryRequirements> Object::memory_requirements() const 287{ 288 uint32_t num_allocations = 1; 289 std::vector<VkMemoryRequirements> info = 290 get_memory_reqs<VkMemoryRequirements>(dev_->handle(), type(), obj(), 0); 291 EXPECT(info.size() == num_allocations); 292 if (info.size() == 1 && !info[0].size) 293 info.clear(); 294 295 return info; 296} 297 298void Object::init(VkObject obj, VkObjectType object_type, bool own) 299{ 300 BaseObject::init(obj, object_type, own); 301 mem_alloc_count_ = memory_allocation_count(); 302} 303 304void Object::reinit(VkObject obj, VkObjectType object_type, bool own) 305{ 306 cleanup(); 307 BaseObject::reinit(obj, object_type, own); 308 mem_alloc_count_ = memory_allocation_count(); 309} 310 311void Object::cleanup() 312{ 313 if (!initialized()) 314 return; 315 316 if (own()) 317 EXPECT(vkDestroyObject(dev_->handle(), type(), obj()) == VK_SUCCESS); 318 319 if (internal_mems_) { 320 delete[] internal_mems_; 321 internal_mems_ = NULL; 322 primary_mem_ = NULL; 323 } 324 325 mem_alloc_count_ = 0; 326} 327 328void Object::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) 329{ 330 bound = true; 331 EXPECT(vkBindObjectMemory(dev_->handle(), type(), obj(), mem.handle(), mem_offset) == VK_SUCCESS); 332} 333 334void Object::alloc_memory() 335{ 336 if (!EXPECT(!internal_mems_) || !mem_alloc_count_) 337 return; 338 339 internal_mems_ = new DeviceMemory[mem_alloc_count_]; 340 341 const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements(); 342 VkMemoryAllocInfo info; 343 344 for (int i = 0; i < mem_reqs.size(); i++) { 345 info = DeviceMemory::alloc_info(mem_reqs[i].size, 0); 346 dev_->phy().set_memory_type(mem_reqs[i].memoryTypeBits, &info, 0); 347 primary_mem_ = &internal_mems_[i]; 348 internal_mems_[i].init(*dev_, info); 349 bind_memory(internal_mems_[i], 0); 350 } 351} 352 353void Object::alloc_memory(VkMemoryPropertyFlags &reqs) 354{ 355 if (!EXPECT(!internal_mems_) || !mem_alloc_count_) 356 return; 357 358 internal_mems_ = new DeviceMemory[mem_alloc_count_]; 359 360 std::vector<VkMemoryRequirements> mem_reqs = memory_requirements(); 361 VkMemoryAllocInfo info; 362 363 for (int i = 0; i < mem_reqs.size(); i++) { 364 info = DeviceMemory::alloc_info(mem_reqs[i].size, 0); 365 dev_->phy().set_memory_type(mem_reqs[i].memoryTypeBits, &info, reqs); 366 primary_mem_ = &internal_mems_[i]; 367 internal_mems_[i].init(*dev_, info); 368 bind_memory(internal_mems_[i], 0); 369 } 370} 371 372std::vector<VkDeviceMemory> Object::memories() const 373{ 374 std::vector<VkDeviceMemory> mems; 375 if (internal_mems_) { 376 mems.reserve(mem_alloc_count_); 377 for (uint32_t i = 0; i < mem_alloc_count_; i++) 378 mems.push_back(internal_mems_[i].handle()); 379 } 380 381 return mems; 382} 383 384Device::~Device() 385{ 386 if (!initialized()) 387 return; 388 389 for (int i = 0; i < QUEUE_COUNT; i++) { 390 for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++) 391 delete *it; 392 queues_[i].clear(); 393 } 394 395 EXPECT(vkDestroyDevice(handle()) == VK_SUCCESS); 396} 397 398void Device::init(std::vector<const char *> &layers, std::vector<const char *> &extensions) 399{ 400 // request all queues 401 const std::vector<VkPhysicalDeviceQueueProperties> queue_props = phy_.queue_properties(); 402 std::vector<VkDeviceQueueCreateInfo> queue_info; 403 queue_info.reserve(queue_props.size()); 404 for (int i = 0; i < queue_props.size(); i++) { 405 VkDeviceQueueCreateInfo qi = {}; 406 qi.queueNodeIndex = i; 407 qi.queueCount = queue_props[i].queueCount; 408 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { 409 graphics_queue_node_index_ = i; 410 } 411 queue_info.push_back(qi); 412 } 413 414 VkDeviceCreateInfo dev_info = {}; 415 dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; 416 dev_info.pNext = NULL; 417 dev_info.queueRecordCount = queue_info.size(); 418 dev_info.pRequestedQueues = &queue_info[0]; 419 dev_info.layerCount = layers.size(); 420 dev_info.ppEnabledLayerNames = &layers[0]; 421 dev_info.extensionCount = extensions.size(); 422 dev_info.ppEnabledExtensionNames = &extensions[0]; 423 dev_info.flags = 0; 424 425 init(dev_info); 426} 427 428void Device::init(const VkDeviceCreateInfo &info) 429{ 430 VkDevice dev; 431 432 if (EXPECT(vkCreateDevice(phy_.handle(), &info, &dev) == VK_SUCCESS)) 433 Handle::init(dev); 434 435 init_queues(); 436 init_formats(); 437} 438 439void Device::init_queues() 440{ 441 VkResult err; 442 uint32_t queue_node_count; 443 444 err = vkGetPhysicalDeviceQueueCount(phy_.handle(), &queue_node_count); 445 EXPECT(err == VK_SUCCESS); 446 EXPECT(queue_node_count >= 1); 447 448 VkPhysicalDeviceQueueProperties* queue_props = new VkPhysicalDeviceQueueProperties[queue_node_count]; 449 450 err = vkGetPhysicalDeviceQueueProperties(phy_.handle(), queue_node_count, queue_props); 451 EXPECT(err == VK_SUCCESS); 452 453 for (uint32_t i = 0; i < queue_node_count; i++) { 454 VkQueue queue; 455 456 for (uint32_t j = 0; j < queue_props[i].queueCount; j++) { 457 // TODO: Need to add support for separate MEMMGR and work queues, including synchronization 458 err = vkGetDeviceQueue(handle(), i, j, &queue); 459 EXPECT(err == VK_SUCCESS); 460 461 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { 462 queues_[GRAPHICS].push_back(new Queue(queue)); 463 } 464 465 if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) { 466 queues_[COMPUTE].push_back(new Queue(queue)); 467 } 468 469 if (queue_props[i].queueFlags & VK_QUEUE_DMA_BIT) { 470 queues_[DMA].push_back(new Queue(queue)); 471 } 472 } 473 } 474 475 delete[] queue_props; 476 477 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty()); 478} 479 480void Device::init_formats() 481{ 482 for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) { 483 const VkFormat fmt = static_cast<VkFormat>(f); 484 const VkFormatProperties props = format_properties(fmt); 485 486 if (props.linearTilingFeatures) { 487 const Format tmp = { fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures }; 488 formats_.push_back(tmp); 489 } 490 491 if (props.optimalTilingFeatures) { 492 const Format tmp = { fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures }; 493 formats_.push_back(tmp); 494 } 495 } 496 497 EXPECT(!formats_.empty()); 498} 499 500VkFormatProperties Device::format_properties(VkFormat format) 501{ 502 VkFormatProperties data; 503 if (!EXPECT(vkGetPhysicalDeviceFormatInfo(phy().handle(), format, &data) == VK_SUCCESS)) 504 memset(&data, 0, sizeof(data)); 505 506 return data; 507} 508 509void Device::wait() 510{ 511 EXPECT(vkDeviceWaitIdle(handle()) == VK_SUCCESS); 512} 513 514VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout) 515{ 516 const std::vector<VkFence> fence_objs = make_objects<VkFence>(fences); 517 VkResult err = vkWaitForFences(handle(), fence_objs.size(), &fence_objs[0], wait_all, timeout); 518 EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT); 519 520 return err; 521} 522 523VkResult Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes, const std::vector<VkCopyDescriptorSet> &copies) 524{ 525 return vkUpdateDescriptorSets(handle(), writes.size(), &writes[0], copies.size(), &copies[0]); 526} 527 528void Queue::submit(const std::vector<const CmdBuffer *> &cmds, Fence &fence) 529{ 530 const std::vector<VkCmdBuffer> cmd_objs = make_objects<VkCmdBuffer>(cmds); 531 EXPECT(vkQueueSubmit(handle(), cmd_objs.size(), &cmd_objs[0], fence.obj()) == VK_SUCCESS); 532} 533 534void Queue::submit(const CmdBuffer &cmd, Fence &fence) 535{ 536 submit(std::vector<const CmdBuffer*>(1, &cmd), fence); 537} 538 539void Queue::submit(const CmdBuffer &cmd) 540{ 541 Fence fence; 542 submit(cmd, fence); 543} 544 545void Queue::wait() 546{ 547 EXPECT(vkQueueWaitIdle(handle()) == VK_SUCCESS); 548} 549 550void Queue::signal_semaphore(Semaphore &sem) 551{ 552 EXPECT(vkQueueSignalSemaphore(handle(), sem.obj()) == VK_SUCCESS); 553} 554 555void Queue::wait_semaphore(Semaphore &sem) 556{ 557 EXPECT(vkQueueWaitSemaphore(handle(), sem.obj()) == VK_SUCCESS); 558} 559 560DeviceMemory::~DeviceMemory() 561{ 562 if (initialized()) 563 EXPECT(vkFreeMemory(device(), handle()) == VK_SUCCESS); 564} 565 566void DeviceMemory::init(const Device &dev, const VkMemoryAllocInfo &info) 567{ 568 NON_DISPATCHABLE_HANDLE_INIT(vkAllocMemory, dev, &info); 569} 570 571const void *DeviceMemory::map(VkFlags flags) const 572{ 573 void *data; 574 if (!EXPECT(vkMapMemory(device(), handle(), 0 ,0, flags, &data) == VK_SUCCESS)) 575 data = NULL; 576 577 return data; 578} 579 580void *DeviceMemory::map(VkFlags flags) 581{ 582 void *data; 583 if (!EXPECT(vkMapMemory(device(), handle(), 0, 0, flags, &data) == VK_SUCCESS)) 584 data = NULL; 585 586 return data; 587} 588 589void DeviceMemory::unmap() const 590{ 591 EXPECT(vkUnmapMemory(device(), handle()) == VK_SUCCESS); 592} 593 594void Fence::init(const Device &dev, const VkFenceCreateInfo &info) 595{ 596 DERIVED_OBJECT_TYPE_INIT(vkCreateFence, dev, VK_OBJECT_TYPE_FENCE, &info); 597 alloc_memory(); 598} 599 600void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info) 601{ 602 DERIVED_OBJECT_TYPE_INIT(vkCreateSemaphore, dev, VK_OBJECT_TYPE_SEMAPHORE, &info); 603 alloc_memory(); 604} 605 606void Event::init(const Device &dev, const VkEventCreateInfo &info) 607{ 608 DERIVED_OBJECT_TYPE_INIT(vkCreateEvent, dev, VK_OBJECT_TYPE_EVENT, &info); 609 alloc_memory(); 610} 611 612void Event::set() 613{ 614 EXPECT(vkSetEvent(dev_->handle(), obj()) == VK_SUCCESS); 615} 616 617void Event::reset() 618{ 619 EXPECT(vkResetEvent(dev_->handle(), obj()) == VK_SUCCESS); 620} 621 622void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info) 623{ 624 DERIVED_OBJECT_TYPE_INIT(vkCreateQueryPool, dev, VK_OBJECT_TYPE_QUERY_POOL, &info); 625 alloc_memory(); 626} 627 628VkResult QueryPool::results(uint32_t start, uint32_t count, size_t size, void *data) 629{ 630 size_t tmp = size; 631 VkResult err = vkGetQueryPoolResults(dev_->handle(), obj(), start, count, &tmp, data, 0); 632 if (err == VK_SUCCESS) { 633 if (!EXPECT(tmp == size)) 634 memset(data, 0, size); 635 } else { 636 EXPECT(err == VK_NOT_READY); 637 } 638 639 return err; 640} 641 642void Buffer::init(const Device &dev, const VkBufferCreateInfo &info) 643{ 644 init_no_mem(dev, info); 645 alloc_memory(); 646} 647 648void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags &reqs) 649{ 650 init_no_mem(dev, info); 651 alloc_memory(reqs); 652} 653 654void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info) 655{ 656 DERIVED_OBJECT_TYPE_INIT(vkCreateBuffer, dev, VK_OBJECT_TYPE_BUFFER, &info); 657 create_info_ = info; 658} 659 660void Buffer::bind_memory(VkDeviceSize offset, VkDeviceSize size, 661 const DeviceMemory &mem, VkDeviceSize mem_offset) 662{ 663 VkQueue queue = dev_->graphics_queues()[0]->handle(); 664 VkSparseMemoryBindInfo bindInfo; 665 memset(&bindInfo, 0, sizeof(VkSparseMemoryBindInfo)); 666 bindInfo.offset = offset; 667 bindInfo.memOffset = mem_offset; 668 bindInfo.mem = mem.handle(); 669 EXPECT(vkQueueBindSparseBufferMemory(queue, obj(), 1, &bindInfo) == VK_SUCCESS); 670} 671 672void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info) 673{ 674 DERIVED_OBJECT_TYPE_INIT(vkCreateBufferView, dev, VK_OBJECT_TYPE_BUFFER_VIEW, &info); 675 alloc_memory(); 676} 677 678void Image::init(const Device &dev, const VkImageCreateInfo &info) 679{ 680 init_no_mem(dev, info); 681 alloc_memory(); 682} 683 684void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags &reqs) 685{ 686 init_no_mem(dev, info); 687 alloc_memory(reqs); 688} 689 690void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info) 691{ 692 DERIVED_OBJECT_TYPE_INIT(vkCreateImage, dev, VK_OBJECT_TYPE_IMAGE, &info); 693 init_info(dev, info); 694} 695 696void Image::init_info(const Device &dev, const VkImageCreateInfo &info) 697{ 698 create_info_ = info; 699 700 for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) { 701 if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) { 702 format_features_ = it->features; 703 break; 704 } 705 } 706} 707 708void Image::bind_memory(const Device &dev, const VkSparseImageMemoryBindInfo &info, 709 const DeviceMemory &mem, VkDeviceSize mem_offset) 710{ 711 VkQueue queue = dev.graphics_queues()[0]->handle(); 712 EXPECT(vkQueueBindSparseImageMemory(queue, obj(), 1, &info) == VK_SUCCESS); 713} 714 715VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const 716{ 717 VkSubresourceLayout data; 718 size_t size = sizeof(data); 719 if (!EXPECT(vkGetImageSubresourceLayout(dev_->handle(), obj(), &subres, &data) == VK_SUCCESS && size == sizeof(data))) 720 memset(&data, 0, sizeof(data)); 721 722 return data; 723} 724 725bool Image::transparent() const 726{ 727 return (create_info_.tiling == VK_IMAGE_TILING_LINEAR && 728 create_info_.samples == 1 && 729 !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | 730 VK_IMAGE_USAGE_DEPTH_STENCIL_BIT))); 731} 732 733void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info) 734{ 735 DERIVED_OBJECT_TYPE_INIT(vkCreateImageView, dev, VK_OBJECT_TYPE_IMAGE_VIEW, &info); 736 alloc_memory(); 737} 738 739void AttachmentView::init(const Device &dev, const VkAttachmentViewCreateInfo &info) 740{ 741 DERIVED_OBJECT_TYPE_INIT(vkCreateAttachmentView, dev, VK_OBJECT_TYPE_ATTACHMENT_VIEW, &info); 742 alloc_memory(); 743} 744 745void ShaderModule::init(const Device &dev, const VkShaderModuleCreateInfo &info) 746{ 747 DERIVED_OBJECT_TYPE_INIT(vkCreateShaderModule, dev, VK_OBJECT_TYPE_SHADER_MODULE, &info); 748} 749 750VkResult ShaderModule::init_try(const Device &dev, const VkShaderModuleCreateInfo &info) 751{ 752 /* 753 * Note: Cannot use DERIVED_OBJECT_TYPE_INIT as we need the 754 * return code. 755 */ 756 VkShaderModule sh; 757 dev_ = &dev; 758 VkResult err = vkCreateShaderModule(dev.handle(), &info, &sh); 759 if (err == VK_SUCCESS) 760 Object::init(sh, VK_OBJECT_TYPE_SHADER_MODULE); 761 762 return err; 763} 764 765void Shader::init(const Device &dev, const VkShaderCreateInfo &info) 766{ 767 DERIVED_OBJECT_TYPE_INIT(vkCreateShader, dev, VK_OBJECT_TYPE_SHADER, &info); 768} 769 770VkResult Shader::init_try(const Device &dev, const VkShaderCreateInfo &info) 771{ 772 /* 773 * Note: Cannot use DERIVED_OBJECT_TYPE_INIT as we need the 774 * return code. 775 */ 776 VkShader sh; 777 dev_ = &dev; 778 VkResult err = vkCreateShader(dev.handle(), &info, &sh); 779 if (err == VK_SUCCESS) 780 Object::init(sh, VK_OBJECT_TYPE_SHADER); 781 782 return err; 783} 784 785void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info) 786{ 787 VkPipelineCache cache; 788 VkPipelineCacheCreateInfo ci; 789 memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo)); 790 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; 791 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache); 792 if (err == VK_SUCCESS) { 793 DERIVED_OBJECT_TYPE_INIT(vkCreateGraphicsPipelines, dev, VK_OBJECT_TYPE_PIPELINE, cache, 1, &info); 794 alloc_memory(); 795 vkDestroyPipelineCache(dev.handle(), cache); 796 } 797} 798 799VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info) 800{ 801 VkPipeline pipe; 802 VkPipelineCache cache; 803 VkPipelineCacheCreateInfo ci; 804 dev_ = &dev; 805 memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo)); 806 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; 807 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache); 808 EXPECT(err == VK_SUCCESS); 809 if (err == VK_SUCCESS) { 810 err = vkCreateGraphicsPipelines(dev.handle(), cache, 1, &info, &pipe); 811 if (err == VK_SUCCESS) { 812 Object::init(pipe, VK_OBJECT_TYPE_PIPELINE); 813 alloc_memory(); 814 vkDestroyPipelineCache(dev.handle(), cache); 815 } 816 } 817 818 return err; 819} 820 821 822void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info) 823{ 824 VkPipelineCache cache; 825 VkPipelineCacheCreateInfo ci; 826 memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo)); 827 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; 828 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache); 829 if (err == VK_SUCCESS) { 830 DERIVED_OBJECT_TYPE_INIT(vkCreateComputePipelines, dev, VK_OBJECT_TYPE_PIPELINE, cache, 1, &info); 831 alloc_memory(); 832 vkDestroyPipelineCache(dev.handle(), cache); 833 } 834} 835 836void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info) 837{ 838 DERIVED_OBJECT_TYPE_INIT(vkCreateSampler, dev, VK_OBJECT_TYPE_SAMPLER, &info); 839 alloc_memory(); 840} 841 842void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info) 843{ 844 DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorSetLayout, dev, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, &info); 845 alloc_memory(); 846} 847 848void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info, const std::vector<const DescriptorSetLayout *> &layouts) 849{ 850 const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts); 851 info.pSetLayouts = &layout_objs[0]; 852 853 DERIVED_OBJECT_TYPE_INIT(vkCreatePipelineLayout, dev, VK_OBJECT_TYPE_PIPELINE_LAYOUT, &info); 854 alloc_memory(); 855} 856 857void DescriptorPool::init(const Device &dev, VkDescriptorPoolUsage usage, 858 uint32_t max_sets, const VkDescriptorPoolCreateInfo &info) 859{ 860 DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorPool, dev, VK_OBJECT_TYPE_DESCRIPTOR_POOL, usage, max_sets, &info); 861 alloc_memory(); 862} 863 864void DescriptorPool::reset() 865{ 866 EXPECT(vkResetDescriptorPool(dev_->handle(), obj()) == VK_SUCCESS); 867} 868 869std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts) 870{ 871 const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts); 872 873 std::vector<VkDescriptorSet> set_objs; 874 set_objs.resize(layout_objs.size()); 875 876 uint32_t set_count; 877 VkResult err = vkAllocDescriptorSets(dev_->handle(), obj(), usage, layout_objs.size(), &layout_objs[0], &set_objs[0], &set_count); 878 if (err == VK_SUCCESS) 879 EXPECT(set_count == set_objs.size()); 880 set_objs.resize(set_count); 881 882 std::vector<DescriptorSet *> sets; 883 sets.reserve(set_count); 884 for (std::vector<VkDescriptorSet>::const_iterator it = set_objs.begin(); it != set_objs.end(); it++) { 885 // do descriptor sets need memories bound? 886 DescriptorSet *descriptorSet = new DescriptorSet(dev, *it); 887 sets.push_back(descriptorSet); 888 } 889 return sets; 890} 891 892std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count) 893{ 894 return alloc_sets(dev, usage, std::vector<const DescriptorSetLayout *>(count, &layout)); 895} 896 897DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout) 898{ 899 std::vector<DescriptorSet *> set = alloc_sets(dev, usage, layout, 1); 900 return (set.empty()) ? NULL : set[0]; 901} 902 903void DynamicVpStateObject::init(const Device &dev, const VkDynamicVpStateCreateInfo &info) 904{ 905 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicViewportState, dev, VK_OBJECT_TYPE_DYNAMIC_VP_STATE, &info); 906 alloc_memory(); 907} 908 909void DynamicRsStateObject::init(const Device &dev, const VkDynamicRsStateCreateInfo &info) 910{ 911 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicRasterState, dev, VK_OBJECT_TYPE_DYNAMIC_RS_STATE, &info); 912 alloc_memory(); 913} 914 915void DynamicCbStateObject::init(const Device &dev, const VkDynamicCbStateCreateInfo &info) 916{ 917 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicColorBlendState, dev, VK_OBJECT_TYPE_DYNAMIC_CB_STATE, &info); 918 alloc_memory(); 919} 920 921void DynamicDsStateObject::init(const Device &dev, const VkDynamicDsStateCreateInfo &info) 922{ 923 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicDepthStencilState, dev, VK_OBJECT_TYPE_DYNAMIC_DS_STATE, &info); 924 alloc_memory(); 925} 926 927void CmdBuffer::init(const Device &dev, const VkCmdBufferCreateInfo &info) 928{ 929 DERIVED_OBJECT_TYPE_INIT(vkCreateCommandBuffer, dev, VK_OBJECT_TYPE_COMMAND_BUFFER, &info); 930} 931 932void CmdBuffer::begin(const VkCmdBufferBeginInfo *info) 933{ 934 EXPECT(vkBeginCommandBuffer(obj(), info) == VK_SUCCESS); 935} 936 937void CmdBuffer::begin() 938{ 939 VkCmdBufferBeginInfo info = {}; 940 info.flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | 941 VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT; 942 info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO; 943 944 begin(&info); 945} 946 947void CmdBuffer::end() 948{ 949 EXPECT(vkEndCommandBuffer(obj()) == VK_SUCCESS); 950} 951 952void CmdBuffer::reset() 953{ 954 EXPECT(vkResetCommandBuffer(obj()) == VK_SUCCESS); 955} 956 957}; // namespace vk_testing 958