vktestbinding.cpp revision 6abe35d79296718595d738f8b78a6487110fa861
1// VK tests 2// 3// Copyright (C) 2014 LunarG, Inc. 4// 5// Permission is hereby granted, free of charge, to any person obtaining a 6// copy of this software and associated documentation files (the "Software"), 7// to deal in the Software without restriction, including without limitation 8// the rights to use, copy, modify, merge, publish, distribute, sublicense, 9// and/or sell copies of the Software, and to permit persons to whom the 10// Software is furnished to do so, subject to the following conditions: 11// 12// The above copyright notice and this permission notice shall be included 13// in all copies or substantial portions of the Software. 14// 15// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21// DEALINGS IN THE SOFTWARE. 22 23#include <iostream> 24#include <string.h> // memset(), memcmp() 25#include <assert.h> 26#include "vktestbinding.h" 27 28namespace { 29 30#define NON_DISPATCHABLE_HANDLE_INIT(create_func, dev, ...) \ 31 do { \ 32 handle_type handle; \ 33 if (EXPECT(create_func(dev.handle(), __VA_ARGS__, &handle) == VK_SUCCESS)) \ 34 NonDispHandle::init(dev.handle(), handle); \ 35 } while (0) 36 37#define NON_DISPATCHABLE_HANDLE_DTOR(cls, destroy_func, ...) \ 38 cls::~cls() \ 39 { \ 40 if (initialized()) \ 41 EXPECT(destroy_func(device(), __VA_ARGS__, handle()) == VK_SUCCESS); \ 42 } 43 44#define DERIVED_OBJECT_TYPE_INIT(create_func, dev, vk_object_type, ...) \ 45 do { \ 46 obj_type obj; \ 47 dev_ = &dev; \ 48 if (EXPECT(create_func(dev.handle(), __VA_ARGS__, &obj) == VK_SUCCESS)) \ 49 base_type::init(obj, vk_object_type); \ 50 } while (0) 51 52#define STRINGIFY(x) #x 53#define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__)) 54 55 56vk_testing::ErrorCallback error_callback; 57 58bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function) 59{ 60 if (error_callback) { 61 error_callback(expr, file, line, function); 62 } else { 63 std::cerr << file << ":" << line << ": " << function << 64 ": Expectation `" << expr << "' failed.\n"; 65 } 66 67 return false; 68} 69 70template<class T, class S> 71std::vector<T> make_handles(const std::vector<S> &v) 72{ 73 std::vector<T> handles; 74 handles.reserve(v.size()); 75 for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++) 76 handles.push_back((*it)->handle()); 77 return handles; 78} 79 80template<class T, class S> 81std::vector<T> make_objects(const std::vector<S> &v) 82{ 83 std::vector<T> objs; 84 objs.reserve(v.size()); 85 for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++) 86 objs.push_back((*it)->obj()); 87 return objs; 88} 89 90template<typename T> 91std::vector<T> get_memory_reqs(VkDevice device, VkObjectType obj_type, VkObject obj, size_t min_elems) 92{ 93 std::vector<T> info; 94 95 info.resize((min_elems > 0)?min_elems:1); 96 if (!EXPECT(vkGetObjectMemoryRequirements(device, obj_type, obj, &info[0]) == VK_SUCCESS)) 97 info.clear(); 98 99 if (info.size() < min_elems) 100 info.resize(min_elems); 101 102 return info; 103} 104} // namespace 105 106namespace vk_testing { 107 108void set_error_callback(ErrorCallback callback) 109{ 110 error_callback = callback; 111} 112 113VkPhysicalDeviceProperties PhysicalDevice::properties() const 114{ 115 VkPhysicalDeviceProperties info; 116 117 EXPECT(vkGetPhysicalDeviceProperties(handle(), &info) == VK_SUCCESS); 118 119 return info; 120} 121 122VkPhysicalDevicePerformance PhysicalDevice::performance() const 123{ 124 VkPhysicalDevicePerformance info; 125 126 EXPECT(vkGetPhysicalDevicePerformance(handle(), &info) == VK_SUCCESS); 127 128 return info; 129} 130 131std::vector<VkPhysicalDeviceQueueProperties> PhysicalDevice::queue_properties() const 132{ 133 std::vector<VkPhysicalDeviceQueueProperties> info; 134 uint32_t count; 135 136 if (EXPECT(vkGetPhysicalDeviceQueueCount(handle(), &count) == VK_SUCCESS)) { 137 info.resize(count); 138 if (!EXPECT(vkGetPhysicalDeviceQueueProperties(handle(), count, &info[0]) == VK_SUCCESS)) 139 info.clear(); 140 } 141 142 return info; 143} 144 145VkPhysicalDeviceMemoryProperties PhysicalDevice::memory_properties() const 146{ 147 VkPhysicalDeviceMemoryProperties info; 148 149 EXPECT(vkGetPhysicalDeviceMemoryProperties(handle(), &info) == VK_SUCCESS); 150 151 152 return info; 153} 154 155/* 156 * Return list of Global layers available 157 */ 158std::vector<VkLayerProperties> GetGlobalLayers() 159{ 160 VkResult err; 161 std::vector<VkLayerProperties> layers; 162 uint32_t layer_count; 163 164 do { 165 layer_count = 0; 166 err = vkGetGlobalLayerProperties(&layer_count, NULL); 167 168 if (err == VK_SUCCESS) { 169 layers.reserve(layer_count); 170 err = vkGetGlobalLayerProperties(&layer_count, &layers[0]); 171 } 172 } while (err == VK_INCOMPLETE); 173 174 assert(err == VK_SUCCESS); 175 176 return layers; 177} 178 179/* 180 * Return list of Global extensions provided by the ICD / Loader 181 */ 182std::vector<VkExtensionProperties> GetGlobalExtensions() 183{ 184 return GetGlobalExtensions(NULL); 185} 186 187/* 188 * Return list of Global extensions provided by the specified layer 189 * If pLayerName is NULL, will return extensions implemented by the loader / ICDs 190 */ 191std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName) 192{ 193 std::vector<VkExtensionProperties> exts; 194 uint32_t ext_count; 195 VkResult err; 196 197 do { 198 ext_count = 0; 199 err = vkGetGlobalExtensionProperties(pLayerName, &ext_count, NULL); 200 201 if (err == VK_SUCCESS) { 202 exts.resize(ext_count); 203 err = vkGetGlobalExtensionProperties(pLayerName, &ext_count, &exts[0]); 204 } 205 } while (err == VK_INCOMPLETE); 206 207 assert(err == VK_SUCCESS); 208 209 return exts; 210} 211 212/* 213 * Return list of PhysicalDevice extensions provided by the ICD / Loader 214 */ 215std::vector<VkExtensionProperties> PhysicalDevice::extensions() const 216{ 217 return extensions(NULL); 218} 219 220/* 221 * Return list of PhysicalDevice extensions provided by the specified layer 222 * If pLayerName is NULL, will return extensions for ICD / loader. 223 */ 224std::vector<VkExtensionProperties> PhysicalDevice::extensions(const char *pLayerName) const 225{ 226 std::vector<VkExtensionProperties> exts; 227 VkResult err; 228 229 do { 230 uint32_t extCount = 0; 231 err = vkGetPhysicalDeviceExtensionProperties(handle(), pLayerName, &extCount, NULL); 232 233 if (err == VK_SUCCESS) { 234 exts.reserve(extCount); 235 err = vkGetPhysicalDeviceExtensionProperties(handle(), pLayerName, &extCount, &exts[0]); 236 } 237 } while (err == VK_INCOMPLETE); 238 239 assert(err == VK_SUCCESS); 240 241 return exts; 242} 243 244VkResult PhysicalDevice::set_memory_type(const uint32_t type_bits, VkMemoryAllocInfo *info, const VkFlags properties) const 245{ 246 uint32_t type_mask = type_bits; 247 // Search memtypes to find first index with those properties 248 for (uint32_t i = 0; i < 32; i++) { 249 if ((type_mask & 1) == 1) { 250 // Type is available, does it match user properties? 251 if ((memory_properties_.memoryTypes[i].propertyFlags & properties) == properties) { 252 info->memoryTypeIndex = i; 253 return VK_SUCCESS; 254 } 255 } 256 type_mask >>= 1; 257 } 258 // No memory types matched, return failure 259 return VK_UNSUPPORTED; 260} 261 262/* 263 * Return list of PhysicalDevice layers 264 */ 265std::vector<VkLayerProperties> PhysicalDevice::layers() const 266{ 267 std::vector<VkLayerProperties> layer_props; 268 VkResult err; 269 270 do { 271 uint32_t layer_count = 0; 272 err = vkGetPhysicalDeviceLayerProperties(handle(), &layer_count, NULL); 273 274 if (err == VK_SUCCESS) { 275 layer_props.reserve(layer_count); 276 err = vkGetPhysicalDeviceLayerProperties(handle(), &layer_count, &layer_props[0]); 277 } 278 } while (err == VK_INCOMPLETE); 279 280 assert(err == VK_SUCCESS); 281 282 return layer_props; 283} 284 285void BaseObject::init(VkObject obj, VkObjectType type, bool own) 286{ 287 EXPECT(!initialized()); 288 reinit(obj, type, own); 289} 290 291void BaseObject::reinit(VkObject obj, VkObjectType type, bool own) 292{ 293 obj_ = obj; 294 object_type_ = type; 295 own_obj_ = own; 296} 297 298uint32_t Object::memory_allocation_count() const 299{ 300 return 1; 301} 302 303std::vector<VkMemoryRequirements> Object::memory_requirements() const 304{ 305 uint32_t num_allocations = 1; 306 std::vector<VkMemoryRequirements> info = 307 get_memory_reqs<VkMemoryRequirements>(dev_->handle(), type(), obj(), 0); 308 EXPECT(info.size() == num_allocations); 309 if (info.size() == 1 && !info[0].size) 310 info.clear(); 311 312 return info; 313} 314 315void Object::init(VkObject obj, VkObjectType object_type, bool own) 316{ 317 BaseObject::init(obj, object_type, own); 318 mem_alloc_count_ = memory_allocation_count(); 319} 320 321void Object::reinit(VkObject obj, VkObjectType object_type, bool own) 322{ 323 cleanup(); 324 BaseObject::reinit(obj, object_type, own); 325 mem_alloc_count_ = memory_allocation_count(); 326} 327 328void Object::cleanup() 329{ 330 if (!initialized()) 331 return; 332 333 if (own()) 334 EXPECT(vkDestroyObject(dev_->handle(), type(), obj()) == VK_SUCCESS); 335 336 if (internal_mems_) { 337 delete[] internal_mems_; 338 internal_mems_ = NULL; 339 primary_mem_ = NULL; 340 } 341 342 mem_alloc_count_ = 0; 343} 344 345void Object::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) 346{ 347 bound = true; 348 EXPECT(vkBindObjectMemory(dev_->handle(), type(), obj(), mem.handle(), mem_offset) == VK_SUCCESS); 349} 350 351void Object::alloc_memory() 352{ 353 if (!EXPECT(!internal_mems_) || !mem_alloc_count_) 354 return; 355 356 internal_mems_ = new DeviceMemory[mem_alloc_count_]; 357 358 const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements(); 359 VkMemoryAllocInfo info; 360 361 for (int i = 0; i < mem_reqs.size(); i++) { 362 info = DeviceMemory::alloc_info(mem_reqs[i].size, 0); 363 dev_->phy().set_memory_type(mem_reqs[i].memoryTypeBits, &info, 0); 364 primary_mem_ = &internal_mems_[i]; 365 internal_mems_[i].init(*dev_, info); 366 bind_memory(internal_mems_[i], 0); 367 } 368} 369 370void Object::alloc_memory(VkMemoryPropertyFlags &reqs) 371{ 372 if (!EXPECT(!internal_mems_) || !mem_alloc_count_) 373 return; 374 375 internal_mems_ = new DeviceMemory[mem_alloc_count_]; 376 377 std::vector<VkMemoryRequirements> mem_reqs = memory_requirements(); 378 VkMemoryAllocInfo info; 379 380 for (int i = 0; i < mem_reqs.size(); i++) { 381 info = DeviceMemory::alloc_info(mem_reqs[i].size, 0); 382 dev_->phy().set_memory_type(mem_reqs[i].memoryTypeBits, &info, reqs); 383 primary_mem_ = &internal_mems_[i]; 384 internal_mems_[i].init(*dev_, info); 385 bind_memory(internal_mems_[i], 0); 386 } 387} 388 389std::vector<VkDeviceMemory> Object::memories() const 390{ 391 std::vector<VkDeviceMemory> mems; 392 if (internal_mems_) { 393 mems.reserve(mem_alloc_count_); 394 for (uint32_t i = 0; i < mem_alloc_count_; i++) 395 mems.push_back(internal_mems_[i].handle()); 396 } 397 398 return mems; 399} 400 401Device::~Device() 402{ 403 if (!initialized()) 404 return; 405 406 for (int i = 0; i < QUEUE_COUNT; i++) { 407 for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++) 408 delete *it; 409 queues_[i].clear(); 410 } 411 412 EXPECT(vkDestroyDevice(handle()) == VK_SUCCESS); 413} 414 415void Device::init(std::vector<const char *> &layers, std::vector<const char *> &extensions) 416{ 417 // request all queues 418 const std::vector<VkPhysicalDeviceQueueProperties> queue_props = phy_.queue_properties(); 419 std::vector<VkDeviceQueueCreateInfo> queue_info; 420 queue_info.reserve(queue_props.size()); 421 for (int i = 0; i < queue_props.size(); i++) { 422 VkDeviceQueueCreateInfo qi = {}; 423 qi.queueNodeIndex = i; 424 qi.queueCount = queue_props[i].queueCount; 425 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { 426 graphics_queue_node_index_ = i; 427 } 428 queue_info.push_back(qi); 429 } 430 431 VkDeviceCreateInfo dev_info = {}; 432 dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; 433 dev_info.pNext = NULL; 434 dev_info.queueRecordCount = queue_info.size(); 435 dev_info.pRequestedQueues = &queue_info[0]; 436 dev_info.layerCount = layers.size(); 437 dev_info.ppEnabledLayerNames = &layers[0]; 438 dev_info.extensionCount = extensions.size(); 439 dev_info.ppEnabledExtensionNames = &extensions[0]; 440 dev_info.flags = 0; 441 442 init(dev_info); 443} 444 445void Device::init(const VkDeviceCreateInfo &info) 446{ 447 VkDevice dev; 448 449 if (EXPECT(vkCreateDevice(phy_.handle(), &info, &dev) == VK_SUCCESS)) 450 Handle::init(dev); 451 452 init_queues(); 453 init_formats(); 454} 455 456void Device::init_queues() 457{ 458 VkResult err; 459 uint32_t queue_node_count; 460 461 err = vkGetPhysicalDeviceQueueCount(phy_.handle(), &queue_node_count); 462 EXPECT(err == VK_SUCCESS); 463 EXPECT(queue_node_count >= 1); 464 465 VkPhysicalDeviceQueueProperties* queue_props = new VkPhysicalDeviceQueueProperties[queue_node_count]; 466 467 err = vkGetPhysicalDeviceQueueProperties(phy_.handle(), queue_node_count, queue_props); 468 EXPECT(err == VK_SUCCESS); 469 470 for (uint32_t i = 0; i < queue_node_count; i++) { 471 VkQueue queue; 472 473 for (uint32_t j = 0; j < queue_props[i].queueCount; j++) { 474 // TODO: Need to add support for separate MEMMGR and work queues, including synchronization 475 err = vkGetDeviceQueue(handle(), i, j, &queue); 476 EXPECT(err == VK_SUCCESS); 477 478 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { 479 queues_[GRAPHICS].push_back(new Queue(queue)); 480 } 481 482 if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) { 483 queues_[COMPUTE].push_back(new Queue(queue)); 484 } 485 486 if (queue_props[i].queueFlags & VK_QUEUE_DMA_BIT) { 487 queues_[DMA].push_back(new Queue(queue)); 488 } 489 } 490 } 491 492 delete[] queue_props; 493 494 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty()); 495} 496 497void Device::init_formats() 498{ 499 for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) { 500 const VkFormat fmt = static_cast<VkFormat>(f); 501 const VkFormatProperties props = format_properties(fmt); 502 503 if (props.linearTilingFeatures) { 504 const Format tmp = { fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures }; 505 formats_.push_back(tmp); 506 } 507 508 if (props.optimalTilingFeatures) { 509 const Format tmp = { fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures }; 510 formats_.push_back(tmp); 511 } 512 } 513 514 EXPECT(!formats_.empty()); 515} 516 517VkFormatProperties Device::format_properties(VkFormat format) 518{ 519 VkFormatProperties data; 520 if (!EXPECT(vkGetPhysicalDeviceFormatInfo(phy().handle(), format, &data) == VK_SUCCESS)) 521 memset(&data, 0, sizeof(data)); 522 523 return data; 524} 525 526void Device::wait() 527{ 528 EXPECT(vkDeviceWaitIdle(handle()) == VK_SUCCESS); 529} 530 531VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout) 532{ 533 const std::vector<VkFence> fence_handles = make_handles<VkFence>(fences); 534 VkResult err = vkWaitForFences(handle(), fence_handles.size(), &fence_handles[0], wait_all, timeout); 535 EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT); 536 537 return err; 538} 539 540VkResult Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes, const std::vector<VkCopyDescriptorSet> &copies) 541{ 542 return vkUpdateDescriptorSets(handle(), writes.size(), &writes[0], copies.size(), &copies[0]); 543} 544 545void Queue::submit(const std::vector<const CmdBuffer *> &cmds, Fence &fence) 546{ 547 const std::vector<VkCmdBuffer> cmd_objs = make_objects<VkCmdBuffer>(cmds); 548 EXPECT(vkQueueSubmit(handle(), cmd_objs.size(), &cmd_objs[0], fence.handle()) == VK_SUCCESS); 549} 550 551void Queue::submit(const CmdBuffer &cmd, Fence &fence) 552{ 553 submit(std::vector<const CmdBuffer*>(1, &cmd), fence); 554} 555 556void Queue::submit(const CmdBuffer &cmd) 557{ 558 Fence fence; 559 submit(cmd, fence); 560} 561 562void Queue::wait() 563{ 564 EXPECT(vkQueueWaitIdle(handle()) == VK_SUCCESS); 565} 566 567void Queue::signal_semaphore(Semaphore &sem) 568{ 569 EXPECT(vkQueueSignalSemaphore(handle(), sem.handle()) == VK_SUCCESS); 570} 571 572void Queue::wait_semaphore(Semaphore &sem) 573{ 574 EXPECT(vkQueueWaitSemaphore(handle(), sem.handle()) == VK_SUCCESS); 575} 576 577DeviceMemory::~DeviceMemory() 578{ 579 if (initialized()) 580 EXPECT(vkFreeMemory(device(), handle()) == VK_SUCCESS); 581} 582 583void DeviceMemory::init(const Device &dev, const VkMemoryAllocInfo &info) 584{ 585 NON_DISPATCHABLE_HANDLE_INIT(vkAllocMemory, dev, &info); 586} 587 588const void *DeviceMemory::map(VkFlags flags) const 589{ 590 void *data; 591 if (!EXPECT(vkMapMemory(device(), handle(), 0 ,0, flags, &data) == VK_SUCCESS)) 592 data = NULL; 593 594 return data; 595} 596 597void *DeviceMemory::map(VkFlags flags) 598{ 599 void *data; 600 if (!EXPECT(vkMapMemory(device(), handle(), 0, 0, flags, &data) == VK_SUCCESS)) 601 data = NULL; 602 603 return data; 604} 605 606void DeviceMemory::unmap() const 607{ 608 EXPECT(vkUnmapMemory(device(), handle()) == VK_SUCCESS); 609} 610 611NON_DISPATCHABLE_HANDLE_DTOR(Fence, vkDestroyObject, VK_OBJECT_TYPE_FENCE) 612 613void Fence::init(const Device &dev, const VkFenceCreateInfo &info) 614{ 615 NON_DISPATCHABLE_HANDLE_INIT(vkCreateFence, dev, &info); 616} 617 618NON_DISPATCHABLE_HANDLE_DTOR(Semaphore, vkDestroyObject, VK_OBJECT_TYPE_SEMAPHORE) 619 620void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info) 621{ 622 NON_DISPATCHABLE_HANDLE_INIT(vkCreateSemaphore, dev, &info); 623} 624 625NON_DISPATCHABLE_HANDLE_DTOR(Event, vkDestroyObject, VK_OBJECT_TYPE_EVENT) 626 627void Event::init(const Device &dev, const VkEventCreateInfo &info) 628{ 629 NON_DISPATCHABLE_HANDLE_INIT(vkCreateEvent, dev, &info); 630} 631 632void Event::set() 633{ 634 EXPECT(vkSetEvent(device(), handle()) == VK_SUCCESS); 635} 636 637void Event::reset() 638{ 639 EXPECT(vkResetEvent(device(), handle()) == VK_SUCCESS); 640} 641 642NON_DISPATCHABLE_HANDLE_DTOR(QueryPool, vkDestroyObject, VK_OBJECT_TYPE_QUERY_POOL) 643 644void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info) 645{ 646 NON_DISPATCHABLE_HANDLE_INIT(vkCreateQueryPool, dev, &info); 647} 648 649VkResult QueryPool::results(uint32_t start, uint32_t count, size_t size, void *data) 650{ 651 size_t tmp = size; 652 VkResult err = vkGetQueryPoolResults(device(), handle(), start, count, &tmp, data, 0); 653 if (err == VK_SUCCESS) { 654 if (!EXPECT(tmp == size)) 655 memset(data, 0, size); 656 } else { 657 EXPECT(err == VK_NOT_READY); 658 } 659 660 return err; 661} 662 663void Buffer::init(const Device &dev, const VkBufferCreateInfo &info) 664{ 665 init_no_mem(dev, info); 666 alloc_memory(); 667} 668 669void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags &reqs) 670{ 671 init_no_mem(dev, info); 672 alloc_memory(reqs); 673} 674 675void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info) 676{ 677 DERIVED_OBJECT_TYPE_INIT(vkCreateBuffer, dev, VK_OBJECT_TYPE_BUFFER, &info); 678 create_info_ = info; 679} 680 681void Buffer::bind_memory(VkDeviceSize offset, VkDeviceSize size, 682 const DeviceMemory &mem, VkDeviceSize mem_offset) 683{ 684 VkQueue queue = dev_->graphics_queues()[0]->handle(); 685 VkSparseMemoryBindInfo bindInfo; 686 memset(&bindInfo, 0, sizeof(VkSparseMemoryBindInfo)); 687 bindInfo.offset = offset; 688 bindInfo.memOffset = mem_offset; 689 bindInfo.mem = mem.handle(); 690 EXPECT(vkQueueBindSparseBufferMemory(queue, obj(), 1, &bindInfo) == VK_SUCCESS); 691} 692 693NON_DISPATCHABLE_HANDLE_DTOR(BufferView, vkDestroyObject, VK_OBJECT_TYPE_BUFFER_VIEW) 694 695void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info) 696{ 697 NON_DISPATCHABLE_HANDLE_INIT(vkCreateBufferView, dev, &info); 698} 699 700void Image::init(const Device &dev, const VkImageCreateInfo &info) 701{ 702 init_no_mem(dev, info); 703 alloc_memory(); 704} 705 706void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags &reqs) 707{ 708 init_no_mem(dev, info); 709 alloc_memory(reqs); 710} 711 712void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info) 713{ 714 DERIVED_OBJECT_TYPE_INIT(vkCreateImage, dev, VK_OBJECT_TYPE_IMAGE, &info); 715 init_info(dev, info); 716} 717 718void Image::init_info(const Device &dev, const VkImageCreateInfo &info) 719{ 720 create_info_ = info; 721 722 for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) { 723 if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) { 724 format_features_ = it->features; 725 break; 726 } 727 } 728} 729 730void Image::bind_memory(const Device &dev, const VkSparseImageMemoryBindInfo &info, 731 const DeviceMemory &mem, VkDeviceSize mem_offset) 732{ 733 VkQueue queue = dev.graphics_queues()[0]->handle(); 734 EXPECT(vkQueueBindSparseImageMemory(queue, obj(), 1, &info) == VK_SUCCESS); 735} 736 737VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const 738{ 739 VkSubresourceLayout data; 740 size_t size = sizeof(data); 741 if (!EXPECT(vkGetImageSubresourceLayout(dev_->handle(), obj(), &subres, &data) == VK_SUCCESS && size == sizeof(data))) 742 memset(&data, 0, sizeof(data)); 743 744 return data; 745} 746 747bool Image::transparent() const 748{ 749 return (create_info_.tiling == VK_IMAGE_TILING_LINEAR && 750 create_info_.samples == 1 && 751 !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | 752 VK_IMAGE_USAGE_DEPTH_STENCIL_BIT))); 753} 754 755NON_DISPATCHABLE_HANDLE_DTOR(ImageView, vkDestroyObject, VK_OBJECT_TYPE_IMAGE_VIEW) 756 757void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info) 758{ 759 NON_DISPATCHABLE_HANDLE_INIT(vkCreateImageView, dev, &info); 760} 761 762NON_DISPATCHABLE_HANDLE_DTOR(AttachmentView, vkDestroyObject, VK_OBJECT_TYPE_ATTACHMENT_VIEW) 763 764void AttachmentView::init(const Device &dev, const VkAttachmentViewCreateInfo &info) 765{ 766 NON_DISPATCHABLE_HANDLE_INIT(vkCreateAttachmentView, dev, &info); 767} 768 769NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule, vkDestroyObject, VK_OBJECT_TYPE_SHADER_MODULE) 770 771void ShaderModule::init(const Device &dev, const VkShaderModuleCreateInfo &info) 772{ 773 NON_DISPATCHABLE_HANDLE_INIT(vkCreateShaderModule, dev, &info); 774} 775 776VkResult ShaderModule::init_try(const Device &dev, const VkShaderModuleCreateInfo &info) 777{ 778 VkShaderModule mod; 779 780 VkResult err = vkCreateShaderModule(dev.handle(), &info, &mod); 781 if (err == VK_SUCCESS) 782 NonDispHandle::init(dev.handle(), mod); 783 784 return err; 785} 786 787NON_DISPATCHABLE_HANDLE_DTOR(Shader, vkDestroyObject, VK_OBJECT_TYPE_SHADER) 788 789void Shader::init(const Device &dev, const VkShaderCreateInfo &info) 790{ 791 NON_DISPATCHABLE_HANDLE_INIT(vkCreateShader, dev, &info); 792} 793 794VkResult Shader::init_try(const Device &dev, const VkShaderCreateInfo &info) 795{ 796 VkShader sh; 797 798 VkResult err = vkCreateShader(dev.handle(), &info, &sh); 799 if (err == VK_SUCCESS) 800 NonDispHandle::init(dev.handle(), sh); 801 802 return err; 803} 804 805NON_DISPATCHABLE_HANDLE_DTOR(Pipeline, vkDestroyObject, VK_OBJECT_TYPE_PIPELINE) 806 807void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info) 808{ 809 VkPipelineCache cache; 810 VkPipelineCacheCreateInfo ci; 811 memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo)); 812 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; 813 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache); 814 if (err == VK_SUCCESS) { 815 NON_DISPATCHABLE_HANDLE_INIT(vkCreateGraphicsPipelines, dev, cache, 1, &info); 816 vkDestroyPipelineCache(dev.handle(), cache); 817 } 818} 819 820VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info) 821{ 822 VkPipeline pipe; 823 VkPipelineCache cache; 824 VkPipelineCacheCreateInfo ci; 825 memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo)); 826 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; 827 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache); 828 EXPECT(err == VK_SUCCESS); 829 if (err == VK_SUCCESS) { 830 err = vkCreateGraphicsPipelines(dev.handle(), cache, 1, &info, &pipe); 831 if (err == VK_SUCCESS) { 832 NonDispHandle::init(dev.handle(), pipe); 833 vkDestroyPipelineCache(dev.handle(), cache); 834 } 835 } 836 837 return err; 838} 839 840void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info) 841{ 842 VkPipelineCache cache; 843 VkPipelineCacheCreateInfo ci; 844 memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo)); 845 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; 846 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache); 847 if (err == VK_SUCCESS) { 848 NON_DISPATCHABLE_HANDLE_INIT(vkCreateComputePipelines, dev, cache, 1, &info); 849 vkDestroyPipelineCache(dev.handle(), cache); 850 } 851} 852 853NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout, vkDestroyObject, VK_OBJECT_TYPE_PIPELINE_LAYOUT) 854 855void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info, const std::vector<const DescriptorSetLayout *> &layouts) 856{ 857 const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts); 858 info.pSetLayouts = &layout_objs[0]; 859 860 NON_DISPATCHABLE_HANDLE_INIT(vkCreatePipelineLayout, dev, &info); 861} 862 863NON_DISPATCHABLE_HANDLE_DTOR(Sampler, vkDestroyObject, VK_OBJECT_TYPE_SAMPLER) 864 865void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info) 866{ 867 NON_DISPATCHABLE_HANDLE_INIT(vkCreateSampler, dev, &info); 868} 869 870void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info) 871{ 872 DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorSetLayout, dev, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, &info); 873 alloc_memory(); 874} 875 876void DescriptorPool::init(const Device &dev, VkDescriptorPoolUsage usage, 877 uint32_t max_sets, const VkDescriptorPoolCreateInfo &info) 878{ 879 DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorPool, dev, VK_OBJECT_TYPE_DESCRIPTOR_POOL, usage, max_sets, &info); 880 alloc_memory(); 881} 882 883void DescriptorPool::reset() 884{ 885 EXPECT(vkResetDescriptorPool(dev_->handle(), obj()) == VK_SUCCESS); 886} 887 888std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts) 889{ 890 const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts); 891 892 std::vector<VkDescriptorSet> set_objs; 893 set_objs.resize(layout_objs.size()); 894 895 uint32_t set_count; 896 VkResult err = vkAllocDescriptorSets(dev_->handle(), obj(), usage, layout_objs.size(), &layout_objs[0], &set_objs[0], &set_count); 897 if (err == VK_SUCCESS) 898 EXPECT(set_count == set_objs.size()); 899 set_objs.resize(set_count); 900 901 std::vector<DescriptorSet *> sets; 902 sets.reserve(set_count); 903 for (std::vector<VkDescriptorSet>::const_iterator it = set_objs.begin(); it != set_objs.end(); it++) { 904 // do descriptor sets need memories bound? 905 DescriptorSet *descriptorSet = new DescriptorSet(dev, *it); 906 sets.push_back(descriptorSet); 907 } 908 return sets; 909} 910 911std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count) 912{ 913 return alloc_sets(dev, usage, std::vector<const DescriptorSetLayout *>(count, &layout)); 914} 915 916DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout) 917{ 918 std::vector<DescriptorSet *> set = alloc_sets(dev, usage, layout, 1); 919 return (set.empty()) ? NULL : set[0]; 920} 921 922void DynamicVpStateObject::init(const Device &dev, const VkDynamicVpStateCreateInfo &info) 923{ 924 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicViewportState, dev, VK_OBJECT_TYPE_DYNAMIC_VP_STATE, &info); 925 alloc_memory(); 926} 927 928void DynamicRsStateObject::init(const Device &dev, const VkDynamicRsStateCreateInfo &info) 929{ 930 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicRasterState, dev, VK_OBJECT_TYPE_DYNAMIC_RS_STATE, &info); 931 alloc_memory(); 932} 933 934void DynamicCbStateObject::init(const Device &dev, const VkDynamicCbStateCreateInfo &info) 935{ 936 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicColorBlendState, dev, VK_OBJECT_TYPE_DYNAMIC_CB_STATE, &info); 937 alloc_memory(); 938} 939 940void DynamicDsStateObject::init(const Device &dev, const VkDynamicDsStateCreateInfo &info) 941{ 942 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicDepthStencilState, dev, VK_OBJECT_TYPE_DYNAMIC_DS_STATE, &info); 943 alloc_memory(); 944} 945 946void CmdBuffer::init(const Device &dev, const VkCmdBufferCreateInfo &info) 947{ 948 DERIVED_OBJECT_TYPE_INIT(vkCreateCommandBuffer, dev, VK_OBJECT_TYPE_COMMAND_BUFFER, &info); 949} 950 951void CmdBuffer::begin(const VkCmdBufferBeginInfo *info) 952{ 953 EXPECT(vkBeginCommandBuffer(obj(), info) == VK_SUCCESS); 954} 955 956void CmdBuffer::begin() 957{ 958 VkCmdBufferBeginInfo info = {}; 959 info.flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | 960 VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT; 961 info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO; 962 963 begin(&info); 964} 965 966void CmdBuffer::end() 967{ 968 EXPECT(vkEndCommandBuffer(obj()) == VK_SUCCESS); 969} 970 971void CmdBuffer::reset() 972{ 973 EXPECT(vkResetCommandBuffer(obj()) == VK_SUCCESS); 974} 975 976}; // namespace vk_testing 977