vktestbinding.cpp revision 7cb8050c2ff859f7856d20531f14217df2edcd6f
1// VK tests 2// 3// Copyright (C) 2014 LunarG, Inc. 4// 5// Permission is hereby granted, free of charge, to any person obtaining a 6// copy of this software and associated documentation files (the "Software"), 7// to deal in the Software without restriction, including without limitation 8// the rights to use, copy, modify, merge, publish, distribute, sublicense, 9// and/or sell copies of the Software, and to permit persons to whom the 10// Software is furnished to do so, subject to the following conditions: 11// 12// The above copyright notice and this permission notice shall be included 13// in all copies or substantial portions of the Software. 14// 15// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21// DEALINGS IN THE SOFTWARE. 22 23#include <iostream> 24#include <string.h> // memset(), memcmp() 25#include <assert.h> 26#include "vktestbinding.h" 27 28namespace { 29 30#define DERIVED_OBJECT_TYPE_INIT(create_func, dev, vk_object_type, ...) \ 31 do { \ 32 obj_type obj; \ 33 dev_ = &dev; \ 34 if (EXPECT(create_func(dev.obj(), __VA_ARGS__, &obj) == VK_SUCCESS)) \ 35 base_type::init(obj, vk_object_type); \ 36 } while (0) 37 38#define STRINGIFY(x) #x 39#define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__)) 40 41 42vk_testing::ErrorCallback error_callback; 43 44bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function) 45{ 46 if (error_callback) { 47 error_callback(expr, file, line, function); 48 } else { 49 std::cerr << file << ":" << line << ": " << function << 50 ": Expectation `" << expr << "' failed.\n"; 51 } 52 53 return false; 54} 55 56template<class T, class S> 57std::vector<T> make_objects(const std::vector<S> &v) 58{ 59 std::vector<T> objs; 60 objs.reserve(v.size()); 61 for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++) 62 objs.push_back((*it)->obj()); 63 return objs; 64} 65 66template<typename T> 67std::vector<T> get_info(VkPhysicalDevice gpu, VkPhysicalDeviceInfoType type, size_t min_elems) 68{ 69 std::vector<T> info; 70 size_t size; 71 if (EXPECT(vkGetPhysicalDeviceInfo(gpu, type, &size, NULL) == VK_SUCCESS && size % sizeof(T) == 0)) { 72 info.resize(size / sizeof(T)); 73 if (!EXPECT(vkGetPhysicalDeviceInfo(gpu, type, &size, &info[0]) == VK_SUCCESS && size == info.size() * sizeof(T))) 74 info.clear(); 75 } 76 77 if (info.size() < min_elems) 78 info.resize(min_elems); 79 80 return info; 81} 82 83template<typename T> 84std::vector<T> get_info(VkDevice device, VkObjectType object_type, VkObject obj, VkObjectInfoType type, size_t min_elems) 85{ 86 std::vector<T> info; 87 size_t size; 88 if (EXPECT(vkGetObjectInfo(device, object_type, obj, type, &size, NULL) == VK_SUCCESS && size % sizeof(T) == 0)) { 89 info.resize(size / sizeof(T)); 90 if (!EXPECT(vkGetObjectInfo(device, object_type, obj, type, &size, &info[0]) == VK_SUCCESS && size == info.size() * sizeof(T))) 91 info.clear(); 92 } 93 94 if (info.size() < min_elems) 95 info.resize(min_elems); 96 97 return info; 98} 99 100} // namespace 101 102namespace vk_testing { 103 104void set_error_callback(ErrorCallback callback) 105{ 106 error_callback = callback; 107} 108 109VkPhysicalDeviceProperties PhysicalGpu::properties() const 110{ 111 return get_info<VkPhysicalDeviceProperties>(gpu_, VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES, 1)[0]; 112} 113 114VkPhysicalDevicePerformance PhysicalGpu::performance() const 115{ 116 return get_info<VkPhysicalDevicePerformance>(gpu_, VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE, 1)[0]; 117} 118 119std::vector<VkPhysicalDeviceQueueProperties> PhysicalGpu::queue_properties() const 120{ 121 return get_info<VkPhysicalDeviceQueueProperties>(gpu_, VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES, 0); 122} 123 124VkPhysicalDeviceMemoryProperties PhysicalGpu::memory_properties() const 125{ 126 return get_info<VkPhysicalDeviceMemoryProperties>(gpu_, VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES, 1)[0]; 127} 128 129void PhysicalGpu::add_extension_dependencies( 130 uint32_t dependency_count, 131 VkExtensionProperties *depencency_props, 132 std::vector<VkExtensionProperties> &ext_list) 133{ 134 for (uint32_t i = 0; i < dependency_count; i++) { 135 136 } 137} 138 139std::vector<VkExtensionProperties> PhysicalGpu::extensions() const 140{ 141 // Extensions to enable 142 static const char *known_exts[] = { 143 "VK_WSI_LunarG", 144 }; 145 std::vector<VkExtensionProperties> exts; 146 size_t extSize = sizeof(uint32_t); 147 uint32_t extCount = 0; 148 if (!EXPECT(vkGetGlobalExtensionInfo(VK_EXTENSION_INFO_TYPE_COUNT, 0, &extSize, &extCount) == VK_SUCCESS)) 149 return exts; 150 151 VkExtensionProperties extProp; 152 extSize = sizeof(VkExtensionProperties); 153 // TODO : Need to update this if/when we have more than 1 extension to enable 154 for (uint32_t i = 0; i < extCount; i++) { 155 if (!EXPECT(vkGetGlobalExtensionInfo(VK_EXTENSION_INFO_TYPE_PROPERTIES, i, &extSize, &extProp) == VK_SUCCESS)) 156 return exts; 157 158 if (!strcmp(known_exts[0], extProp.name)) 159 exts.push_back(extProp); 160 } 161 162 return exts; 163} 164 165VkPhysicalDeviceCompatibilityInfo PhysicalGpu::compatibility(const PhysicalGpu &other) const 166{ 167 VkPhysicalDeviceCompatibilityInfo data; 168 if (!EXPECT(vkGetMultiDeviceCompatibility(gpu_, other.gpu_, &data) == VK_SUCCESS)) 169 memset(&data, 0, sizeof(data)); 170 171 return data; 172} 173 174void BaseObject::init(VkObject obj, VkObjectType type, bool own) 175{ 176 EXPECT(!initialized()); 177 reinit(obj, type, own); 178} 179 180void BaseObject::reinit(VkObject obj, VkObjectType type, bool own) 181{ 182 obj_ = obj; 183 object_type_ = type; 184 own_obj_ = own; 185} 186 187uint32_t Object::memory_allocation_count() const 188{ 189 /// LUGMAL return get_info<uint32_t>(dev_->obj(), type(), obj(), VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT, 1)[0]; 190 return 1; 191} 192 193std::vector<VkMemoryRequirements> Object::memory_requirements() const 194{ 195 //// VkResult err; 196 uint32_t num_allocations = 1; 197 //// size_t num_alloc_size = sizeof(num_allocations); 198 //// err = vkGetObjectInfo(dev_->obj(), type(), obj(), VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT, 199 //// &num_alloc_size, &num_allocations); 200 //// EXPECT(err == VK_SUCCESS && num_alloc_size == sizeof(num_allocations)); 201 std::vector<VkMemoryRequirements> info = 202 get_info<VkMemoryRequirements>(dev_->obj(), type(), obj(), VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS, 0); 203 EXPECT(info.size() == num_allocations); 204 if (info.size() == 1 && !info[0].size) 205 info.clear(); 206 207 return info; 208} 209 210void Object::init(VkObject obj, VkObjectType object_type, bool own) 211{ 212 BaseObject::init(obj, object_type, own); 213 mem_alloc_count_ = memory_allocation_count(); 214} 215 216void Object::reinit(VkObject obj, VkObjectType object_type, bool own) 217{ 218 cleanup(); 219 BaseObject::reinit(obj, object_type, own); 220 mem_alloc_count_ = memory_allocation_count(); 221} 222 223void Object::cleanup() 224{ 225 if (!initialized()) 226 return; 227 228 if (own()) 229 EXPECT(vkDestroyObject(dev_->obj(), type(), obj()) == VK_SUCCESS); 230 231 if (internal_mems_) { 232 delete[] internal_mems_; 233 internal_mems_ = NULL; 234 primary_mem_ = NULL; 235 } 236 237 mem_alloc_count_ = 0; 238} 239 240void Object::bind_memory(const GpuMemory &mem, VkDeviceSize mem_offset) 241{ 242 bound = true; 243 EXPECT(vkBindObjectMemory(dev_->obj(), type(), obj(), mem.obj(), mem_offset) == VK_SUCCESS); 244} 245 246void Object::alloc_memory() 247{ 248 if (!EXPECT(!internal_mems_) || !mem_alloc_count_) 249 return; 250 251 internal_mems_ = new GpuMemory[mem_alloc_count_]; 252 253 const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements(); 254 VkMemoryAllocInfo info, *next_info = NULL; 255 256 for (int i = 0; i < mem_reqs.size(); i++) { 257 info = GpuMemory::alloc_info(mem_reqs[i], next_info); 258 primary_mem_ = &internal_mems_[i]; 259 internal_mems_[i].init(*dev_, info); 260 bind_memory(internal_mems_[i], 0); 261 } 262} 263 264void Object::alloc_memory(VkMemoryPropertyFlags &reqs) 265{ 266 if (!EXPECT(!internal_mems_) || !mem_alloc_count_) 267 return; 268 269 internal_mems_ = new GpuMemory[mem_alloc_count_]; 270 271 std::vector<VkMemoryRequirements> mem_reqs = memory_requirements(); 272 VkMemoryAllocInfo info, *next_info = NULL; 273 274 for (int i = 0; i < mem_reqs.size(); i++) { 275 mem_reqs[i].memPropsRequired |= reqs; 276 info = GpuMemory::alloc_info(mem_reqs[i], next_info); 277 primary_mem_ = &internal_mems_[i]; 278 internal_mems_[i].init(*dev_, info); 279 bind_memory(internal_mems_[i], 0); 280 } 281} 282 283void Object::alloc_memory(const std::vector<VkDeviceMemory> &mems) 284{ 285 if (!EXPECT(!internal_mems_) || !mem_alloc_count_) 286 return; 287 288 internal_mems_ = new GpuMemory[mem_alloc_count_]; 289 290 const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements(); 291 if (!EXPECT(mem_reqs.size() == mems.size())) 292 return; 293 294 for (int i = 0; i < mem_reqs.size(); i++) { 295 primary_mem_ = &internal_mems_[i]; 296 297 internal_mems_[i].init(*dev_, mems[i]); 298 bind_memory(internal_mems_[i], 0); 299 } 300} 301 302std::vector<VkDeviceMemory> Object::memories() const 303{ 304 std::vector<VkDeviceMemory> mems; 305 if (internal_mems_) { 306 mems.reserve(mem_alloc_count_); 307 for (uint32_t i = 0; i < mem_alloc_count_; i++) 308 mems.push_back(internal_mems_[i].obj()); 309 } 310 311 return mems; 312} 313 314Device::~Device() 315{ 316 if (!initialized()) 317 return; 318 319 for (int i = 0; i < QUEUE_COUNT; i++) { 320 for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++) 321 delete *it; 322 queues_[i].clear(); 323 } 324 325 EXPECT(vkDestroyDevice(obj()) == VK_SUCCESS); 326} 327 328void Device::init(std::vector<VkExtensionProperties> extensions) 329{ 330 // request all queues 331 const std::vector<VkPhysicalDeviceQueueProperties> queue_props = gpu_.queue_properties(); 332 std::vector<VkDeviceQueueCreateInfo> queue_info; 333 queue_info.reserve(queue_props.size()); 334 for (int i = 0; i < queue_props.size(); i++) { 335 VkDeviceQueueCreateInfo qi = {}; 336 qi.queueNodeIndex = i; 337 qi.queueCount = queue_props[i].queueCount; 338 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { 339 graphics_queue_node_index_ = i; 340 } 341 queue_info.push_back(qi); 342 } 343 344 VkDeviceCreateInfo dev_info = {}; 345 dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; 346 dev_info.pNext = NULL; 347 dev_info.queueRecordCount = queue_info.size(); 348 dev_info.pRequestedQueues = &queue_info[0]; 349 dev_info.extensionCount = extensions.size(); 350 dev_info.pEnabledExtensions = &extensions[0]; 351 dev_info.flags = 0; 352 353 init(dev_info); 354} 355 356void Device::init(const VkDeviceCreateInfo &info) 357{ 358 VkDevice obj; 359 if (EXPECT(vkCreateDevice(gpu_.obj(), &info, &obj) == VK_SUCCESS)) { 360 base_type::init(obj, VK_OBJECT_TYPE_DEVICE); 361 } 362 363 init_queues(); 364 init_formats(); 365} 366 367void Device::init_queues() 368{ 369 VkResult err; 370 size_t data_size; 371 uint32_t queue_node_count; 372 373 err = vkGetPhysicalDeviceInfo(gpu_.obj(), VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES, 374 &data_size, NULL); 375 EXPECT(err == VK_SUCCESS); 376 377 queue_node_count = data_size / sizeof(VkPhysicalDeviceQueueProperties); 378 EXPECT(queue_node_count >= 1); 379 380 VkPhysicalDeviceQueueProperties* queue_props = new VkPhysicalDeviceQueueProperties[queue_node_count]; 381 382 err = vkGetPhysicalDeviceInfo(gpu_.obj(), VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES, 383 &data_size, queue_props); 384 EXPECT(err == VK_SUCCESS); 385 386 for (uint32_t i = 0; i < queue_node_count; i++) { 387 VkQueue queue; 388 389 for (uint32_t j = 0; j < queue_props[i].queueCount; j++) { 390 // TODO: Need to add support for separate MEMMGR and work queues, including synchronization 391 err = vkGetDeviceQueue(obj(), i, j, &queue); 392 EXPECT(err == VK_SUCCESS); 393 394 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { 395 queues_[GRAPHICS].push_back(new Queue(queue)); 396 } 397 398 if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) { 399 queues_[COMPUTE].push_back(new Queue(queue)); 400 } 401 402 if (queue_props[i].queueFlags & VK_QUEUE_DMA_BIT) { 403 queues_[DMA].push_back(new Queue(queue)); 404 } 405 } 406 } 407 408 delete[] queue_props; 409 410 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty()); 411} 412 413void Device::init_formats() 414{ 415 for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) { 416 const VkFormat fmt = static_cast<VkFormat>(f); 417 const VkFormatProperties props = format_properties(fmt); 418 419 if (props.linearTilingFeatures) { 420 const Format tmp = { fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures }; 421 formats_.push_back(tmp); 422 } 423 424 if (props.optimalTilingFeatures) { 425 const Format tmp = { fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures }; 426 formats_.push_back(tmp); 427 } 428 } 429 430 EXPECT(!formats_.empty()); 431} 432 433VkFormatProperties Device::format_properties(VkFormat format) 434{ 435 const VkFormatInfoType type = VK_FORMAT_INFO_TYPE_PROPERTIES; 436 VkFormatProperties data; 437 size_t size = sizeof(data); 438 if (!EXPECT(vkGetFormatInfo(obj(), format, type, &size, &data) == VK_SUCCESS && size == sizeof(data))) 439 memset(&data, 0, sizeof(data)); 440 441 return data; 442} 443 444void Device::wait() 445{ 446 EXPECT(vkDeviceWaitIdle(obj()) == VK_SUCCESS); 447} 448 449VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout) 450{ 451 const std::vector<VkFence> fence_objs = make_objects<VkFence>(fences); 452 VkResult err = vkWaitForFences(obj(), fence_objs.size(), &fence_objs[0], wait_all, timeout); 453 EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT); 454 455 return err; 456} 457 458VkResult Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes, const std::vector<VkCopyDescriptorSet> &copies) 459{ 460 return vkUpdateDescriptorSets(obj(), writes.size(), &writes[0], copies.size(), &copies[0]); 461} 462 463void Queue::submit(const std::vector<const CmdBuffer *> &cmds, Fence &fence) 464{ 465 const std::vector<VkCmdBuffer> cmd_objs = make_objects<VkCmdBuffer>(cmds); 466 EXPECT(vkQueueSubmit(obj(), cmd_objs.size(), &cmd_objs[0], fence.obj()) == VK_SUCCESS); 467} 468 469void Queue::submit(const CmdBuffer &cmd, Fence &fence) 470{ 471 submit(std::vector<const CmdBuffer*>(1, &cmd), fence); 472} 473 474void Queue::submit(const CmdBuffer &cmd) 475{ 476 Fence fence; 477 submit(cmd, fence); 478} 479 480void Queue::wait() 481{ 482 EXPECT(vkQueueWaitIdle(obj()) == VK_SUCCESS); 483} 484 485void Queue::signal_semaphore(Semaphore &sem) 486{ 487 EXPECT(vkQueueSignalSemaphore(obj(), sem.obj()) == VK_SUCCESS); 488} 489 490void Queue::wait_semaphore(Semaphore &sem) 491{ 492 EXPECT(vkQueueWaitSemaphore(obj(), sem.obj()) == VK_SUCCESS); 493} 494 495GpuMemory::~GpuMemory() 496{ 497 if (initialized() && own()) 498 EXPECT(vkFreeMemory(dev_->obj(), obj()) == VK_SUCCESS); 499} 500 501void GpuMemory::init(const Device &dev, const VkMemoryAllocInfo &info) 502{ 503 DERIVED_OBJECT_TYPE_INIT(vkAllocMemory, dev, VK_OBJECT_TYPE_DEVICE_MEMORY, &info); 504} 505 506void GpuMemory::init(const Device &dev, size_t size, const void *data) 507{ 508 DERIVED_OBJECT_TYPE_INIT(vkPinSystemMemory, dev, VK_OBJECT_TYPE_DEVICE_MEMORY, data, size); 509} 510 511void GpuMemory::init(const Device &dev, const VkMemoryOpenInfo &info) 512{ 513 DERIVED_OBJECT_TYPE_INIT(vkOpenSharedMemory, dev, VK_OBJECT_TYPE_DEVICE_MEMORY, &info); 514} 515 516void GpuMemory::init(const Device &dev, const VkPeerMemoryOpenInfo &info) 517{ 518 DERIVED_OBJECT_TYPE_INIT(vkOpenPeerMemory, dev, VK_OBJECT_TYPE_DEVICE_MEMORY, &info); 519} 520 521void GpuMemory::init(const Device &dev, VkDeviceMemory mem) 522{ 523 dev_ = &dev; 524 BaseObject::init(mem, VK_OBJECT_TYPE_DEVICE_MEMORY, false); 525} 526 527const void *GpuMemory::map(VkFlags flags) const 528{ 529 void *data; 530 if (!EXPECT(vkMapMemory(dev_->obj(), obj(), 0 ,0, flags, &data) == VK_SUCCESS)) 531 data = NULL; 532 533 return data; 534} 535 536void *GpuMemory::map(VkFlags flags) 537{ 538 void *data; 539 if (!EXPECT(vkMapMemory(dev_->obj(), obj(), 0, 0, flags, &data) == VK_SUCCESS)) 540 data = NULL; 541 542 return data; 543} 544 545void GpuMemory::unmap() const 546{ 547 EXPECT(vkUnmapMemory(dev_->obj(), obj()) == VK_SUCCESS); 548} 549 550void Fence::init(const Device &dev, const VkFenceCreateInfo &info) 551{ 552 DERIVED_OBJECT_TYPE_INIT(vkCreateFence, dev, VK_OBJECT_TYPE_FENCE, &info); 553 alloc_memory(); 554} 555 556void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info) 557{ 558 DERIVED_OBJECT_TYPE_INIT(vkCreateSemaphore, dev, VK_OBJECT_TYPE_SEMAPHORE, &info); 559 alloc_memory(); 560} 561 562void Semaphore::init(const Device &dev, const VkSemaphoreOpenInfo &info) 563{ 564 DERIVED_OBJECT_TYPE_INIT(vkOpenSharedSemaphore, dev, VK_OBJECT_TYPE_SEMAPHORE, &info); 565} 566 567void Event::init(const Device &dev, const VkEventCreateInfo &info) 568{ 569 DERIVED_OBJECT_TYPE_INIT(vkCreateEvent, dev, VK_OBJECT_TYPE_EVENT, &info); 570 alloc_memory(); 571} 572 573void Event::set() 574{ 575 EXPECT(vkSetEvent(dev_->obj(), obj()) == VK_SUCCESS); 576} 577 578void Event::reset() 579{ 580 EXPECT(vkResetEvent(dev_->obj(), obj()) == VK_SUCCESS); 581} 582 583void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info) 584{ 585 DERIVED_OBJECT_TYPE_INIT(vkCreateQueryPool, dev, VK_OBJECT_TYPE_QUERY_POOL, &info); 586 alloc_memory(); 587} 588 589VkResult QueryPool::results(uint32_t start, uint32_t count, size_t size, void *data) 590{ 591 size_t tmp = size; 592 VkResult err = vkGetQueryPoolResults(dev_->obj(), obj(), start, count, &tmp, data, 0); 593 if (err == VK_SUCCESS) { 594 if (!EXPECT(tmp == size)) 595 memset(data, 0, size); 596 } else { 597 EXPECT(err == VK_NOT_READY); 598 } 599 600 return err; 601} 602 603void Buffer::init(const Device &dev, const VkBufferCreateInfo &info) 604{ 605 init_no_mem(dev, info); 606 alloc_memory(); 607} 608 609void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags &reqs) 610{ 611 init_no_mem(dev, info); 612 alloc_memory(reqs); 613} 614 615void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info) 616{ 617 DERIVED_OBJECT_TYPE_INIT(vkCreateBuffer, dev, VK_OBJECT_TYPE_BUFFER, &info); 618 create_info_ = info; 619} 620 621void Buffer::bind_memory(VkDeviceSize offset, VkDeviceSize size, 622 const GpuMemory &mem, VkDeviceSize mem_offset) 623{ 624 VkQueue queue = dev_->graphics_queues()[0]->obj(); 625 EXPECT(vkQueueBindSparseBufferMemory(queue, obj(), offset, size, mem.obj(), mem_offset) == VK_SUCCESS); 626} 627 628void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info) 629{ 630 DERIVED_OBJECT_TYPE_INIT(vkCreateBufferView, dev, VK_OBJECT_TYPE_BUFFER_VIEW, &info); 631 alloc_memory(); 632} 633 634void Image::init(const Device &dev, const VkImageCreateInfo &info) 635{ 636 init_no_mem(dev, info); 637 alloc_memory(); 638} 639 640void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags &reqs) 641{ 642 init_no_mem(dev, info); 643 alloc_memory(reqs); 644} 645 646void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info) 647{ 648 DERIVED_OBJECT_TYPE_INIT(vkCreateImage, dev, VK_OBJECT_TYPE_IMAGE, &info); 649 init_info(dev, info); 650} 651 652void Image::init(const Device &dev, const VkPeerImageOpenInfo &info, const VkImageCreateInfo &original_info) 653{ 654 VkImage img; 655 VkDeviceMemory mem; 656 dev_ = &dev; 657 EXPECT(vkOpenPeerImage(dev.obj(), &info, &img, &mem) == VK_SUCCESS); 658 Object::init(img, VK_OBJECT_TYPE_IMAGE); 659 660 init_info(dev, original_info); 661 alloc_memory(std::vector<VkDeviceMemory>(1, mem)); 662} 663 664void Image::init_info(const Device &dev, const VkImageCreateInfo &info) 665{ 666 create_info_ = info; 667 668 for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) { 669 if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) { 670 format_features_ = it->features; 671 break; 672 } 673 } 674} 675 676void Image::bind_memory(const Device &dev, const VkImageMemoryBindInfo &info, 677 const GpuMemory &mem, VkDeviceSize mem_offset) 678{ 679 VkQueue queue = dev.graphics_queues()[0]->obj(); 680 EXPECT(vkQueueBindSparseImageMemory(queue, obj(), &info, mem.obj(), mem_offset) == VK_SUCCESS); 681} 682 683VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const 684{ 685 const VkSubresourceInfoType type = VK_SUBRESOURCE_INFO_TYPE_LAYOUT; 686 VkSubresourceLayout data; 687 size_t size = sizeof(data); 688 if (!EXPECT(vkGetImageSubresourceInfo(dev_->obj(), obj(), &subres, type, &size, &data) == VK_SUCCESS && size == sizeof(data))) 689 memset(&data, 0, sizeof(data)); 690 691 return data; 692} 693 694bool Image::transparent() const 695{ 696 return (create_info_.tiling == VK_IMAGE_TILING_LINEAR && 697 create_info_.samples == 1 && 698 !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | 699 VK_IMAGE_USAGE_DEPTH_STENCIL_BIT))); 700} 701 702void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info) 703{ 704 DERIVED_OBJECT_TYPE_INIT(vkCreateImageView, dev, VK_OBJECT_TYPE_IMAGE_VIEW, &info); 705 alloc_memory(); 706} 707 708void ColorAttachmentView::init(const Device &dev, const VkColorAttachmentViewCreateInfo &info) 709{ 710 DERIVED_OBJECT_TYPE_INIT(vkCreateColorAttachmentView, dev, VK_OBJECT_TYPE_COLOR_ATTACHMENT_VIEW, &info); 711 alloc_memory(); 712} 713 714void DepthStencilView::init(const Device &dev, const VkDepthStencilViewCreateInfo &info) 715{ 716 DERIVED_OBJECT_TYPE_INIT(vkCreateDepthStencilView, dev, VK_OBJECT_TYPE_DEPTH_STENCIL_VIEW, &info); 717 alloc_memory(); 718} 719 720void Shader::init(const Device &dev, const VkShaderCreateInfo &info) 721{ 722 DERIVED_OBJECT_TYPE_INIT(vkCreateShader, dev, VK_OBJECT_TYPE_SHADER, &info); 723} 724 725VkResult Shader::init_try(const Device &dev, const VkShaderCreateInfo &info) 726{ 727 /* 728 * Note: Cannot use DERIVED_OBJECT_TYPE_INIT as we need the 729 * return code. 730 */ 731 VkShader sh; 732 dev_ = &dev; 733 VkResult err = vkCreateShader(dev.obj(), &info, &sh); 734 if (err == VK_SUCCESS) 735 Object::init(sh, VK_OBJECT_TYPE_SHADER); 736 737 return err; 738} 739 740void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info) 741{ 742 DERIVED_OBJECT_TYPE_INIT(vkCreateGraphicsPipeline, dev, VK_OBJECT_TYPE_PIPELINE, &info); 743 alloc_memory(); 744} 745 746VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info) 747{ 748 VkPipeline pipe; 749 dev_ = &dev; 750 VkResult err = vkCreateGraphicsPipeline(dev.obj(), &info, &pipe); 751 if (err == VK_SUCCESS) { 752 Object::init(pipe, VK_OBJECT_TYPE_PIPELINE); 753 alloc_memory(); 754 } 755 756 return err; 757} 758 759void Pipeline::init( 760 const Device &dev, 761 const VkGraphicsPipelineCreateInfo &info, 762 const VkPipeline basePipeline) 763{ 764 DERIVED_OBJECT_TYPE_INIT(vkCreateGraphicsPipelineDerivative, dev, VK_OBJECT_TYPE_PIPELINE, &info, basePipeline); 765 alloc_memory(); 766} 767 768void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info) 769{ 770 DERIVED_OBJECT_TYPE_INIT(vkCreateComputePipeline, dev, VK_OBJECT_TYPE_PIPELINE, &info); 771 alloc_memory(); 772} 773 774void Pipeline::init(const Device&dev, size_t size, const void *data) 775{ 776 DERIVED_OBJECT_TYPE_INIT(vkLoadPipeline, dev, VK_OBJECT_TYPE_PIPELINE, size, data); 777 alloc_memory(); 778} 779 780void Pipeline::init( 781 const Device&dev, 782 size_t size, 783 const void *data, 784 const VkPipeline basePipeline) 785{ 786 DERIVED_OBJECT_TYPE_INIT(vkLoadPipelineDerivative, dev, VK_OBJECT_TYPE_PIPELINE, size, data, basePipeline); 787 alloc_memory(); 788} 789 790size_t Pipeline::store(size_t size, void *data) 791{ 792 if (!EXPECT(vkStorePipeline(dev_->obj(), obj(), &size, data) == VK_SUCCESS)) 793 size = 0; 794 795 return size; 796} 797 798void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info) 799{ 800 DERIVED_OBJECT_TYPE_INIT(vkCreateSampler, dev, VK_OBJECT_TYPE_SAMPLER, &info); 801 alloc_memory(); 802} 803 804void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info) 805{ 806 DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorSetLayout, dev, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, &info); 807 alloc_memory(); 808} 809 810void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info, const std::vector<const DescriptorSetLayout *> &layouts) 811{ 812 const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts); 813 info.pSetLayouts = &layout_objs[0]; 814 815 DERIVED_OBJECT_TYPE_INIT(vkCreatePipelineLayout, dev, VK_OBJECT_TYPE_PIPELINE_LAYOUT, &info); 816 alloc_memory(); 817} 818 819void DescriptorPool::init(const Device &dev, VkDescriptorPoolUsage usage, 820 uint32_t max_sets, const VkDescriptorPoolCreateInfo &info) 821{ 822 DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorPool, dev, VK_OBJECT_TYPE_DESCRIPTOR_POOL, usage, max_sets, &info); 823 alloc_memory(); 824} 825 826void DescriptorPool::reset() 827{ 828 EXPECT(vkResetDescriptorPool(dev_->obj(), obj()) == VK_SUCCESS); 829} 830 831std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts) 832{ 833 const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts); 834 835 std::vector<VkDescriptorSet> set_objs; 836 set_objs.resize(layout_objs.size()); 837 838 uint32_t set_count; 839 VkResult err = vkAllocDescriptorSets(dev_->obj(), obj(), usage, layout_objs.size(), &layout_objs[0], &set_objs[0], &set_count); 840 if (err == VK_SUCCESS) 841 EXPECT(set_count == set_objs.size()); 842 set_objs.resize(set_count); 843 844 std::vector<DescriptorSet *> sets; 845 sets.reserve(set_count); 846 for (std::vector<VkDescriptorSet>::const_iterator it = set_objs.begin(); it != set_objs.end(); it++) { 847 // do descriptor sets need memories bound? 848 DescriptorSet *descriptorSet = new DescriptorSet(dev, *it); 849 sets.push_back(descriptorSet); 850 } 851 return sets; 852} 853 854std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count) 855{ 856 return alloc_sets(dev, usage, std::vector<const DescriptorSetLayout *>(count, &layout)); 857} 858 859DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout) 860{ 861 std::vector<DescriptorSet *> set = alloc_sets(dev, usage, layout, 1); 862 return (set.empty()) ? NULL : set[0]; 863} 864 865void DescriptorPool::clear_sets(const std::vector<DescriptorSet *> &sets) 866{ 867 const std::vector<VkDescriptorSet> set_objs = make_objects<VkDescriptorSet>(sets); 868 vkClearDescriptorSets(dev_->obj(), obj(), set_objs.size(), &set_objs[0]); 869} 870 871void DynamicVpStateObject::init(const Device &dev, const VkDynamicVpStateCreateInfo &info) 872{ 873 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicViewportState, dev, VK_OBJECT_TYPE_DYNAMIC_VP_STATE, &info); 874 alloc_memory(); 875} 876 877void DynamicRsStateObject::init(const Device &dev, const VkDynamicRsStateCreateInfo &info) 878{ 879 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicRasterState, dev, VK_OBJECT_TYPE_DYNAMIC_RS_STATE, &info); 880 alloc_memory(); 881} 882 883void DynamicCbStateObject::init(const Device &dev, const VkDynamicCbStateCreateInfo &info) 884{ 885 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicColorBlendState, dev, VK_OBJECT_TYPE_DYNAMIC_CB_STATE, &info); 886 alloc_memory(); 887} 888 889void DynamicDsStateObject::init(const Device &dev, const VkDynamicDsStateCreateInfo &info) 890{ 891 DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicDepthStencilState, dev, VK_OBJECT_TYPE_DYNAMIC_DS_STATE, &info); 892 alloc_memory(); 893} 894 895void CmdBuffer::init(const Device &dev, const VkCmdBufferCreateInfo &info) 896{ 897 DERIVED_OBJECT_TYPE_INIT(vkCreateCommandBuffer, dev, VK_OBJECT_TYPE_COMMAND_BUFFER, &info); 898} 899 900void CmdBuffer::begin(const VkCmdBufferBeginInfo *info) 901{ 902 EXPECT(vkBeginCommandBuffer(obj(), info) == VK_SUCCESS); 903} 904 905void CmdBuffer::begin(VkRenderPass renderpass_obj, VkFramebuffer framebuffer_obj) 906{ 907 VkCmdBufferBeginInfo info = {}; 908 VkCmdBufferGraphicsBeginInfo graphics_cmd_buf_info = {}; 909 graphics_cmd_buf_info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO; 910 graphics_cmd_buf_info.pNext = NULL; 911 graphics_cmd_buf_info.renderPassContinue.renderPass = renderpass_obj; 912 graphics_cmd_buf_info.renderPassContinue.framebuffer = framebuffer_obj; 913 914 info.flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | 915 VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT; 916 info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO; 917 info.pNext = &graphics_cmd_buf_info; 918 919 begin(&info); 920} 921 922void CmdBuffer::begin() 923{ 924 VkCmdBufferBeginInfo info = {}; 925 info.flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT | 926 VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT; 927 info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO; 928 929 begin(&info); 930} 931 932void CmdBuffer::end() 933{ 934 EXPECT(vkEndCommandBuffer(obj()) == VK_SUCCESS); 935} 936 937void CmdBuffer::reset() 938{ 939 EXPECT(vkResetCommandBuffer(obj()) == VK_SUCCESS); 940} 941 942}; // namespace vk_testing 943