vktestbinding.cpp revision b774fb40e4cea42a483ae272956fedcf3f0f3d82
1// VK tests
2//
3// Copyright (C) 2014 LunarG, Inc.
4//
5// Permission is hereby granted, free of charge, to any person obtaining a
6// copy of this software and associated documentation files (the "Software"),
7// to deal in the Software without restriction, including without limitation
8// the rights to use, copy, modify, merge, publish, distribute, sublicense,
9// and/or sell copies of the Software, and to permit persons to whom the
10// Software is furnished to do so, subject to the following conditions:
11//
12// The above copyright notice and this permission notice shall be included
13// in all copies or substantial portions of the Software.
14//
15// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21// DEALINGS IN THE SOFTWARE.
22
23#include <iostream>
24#include <string.h> // memset(), memcmp()
25#include <assert.h>
26#include "vktestbinding.h"
27
28namespace {
29
30#define DERIVED_OBJECT_TYPE_INIT(create_func, dev, vk_object_type, ...)         \
31    do {                                                                        \
32        obj_type obj;                                                           \
33        dev_ = &dev;                                                        \
34        if (EXPECT(create_func(dev.obj(), __VA_ARGS__, &obj) == VK_SUCCESS))    \
35            base_type::init(obj, vk_object_type);                               \
36    } while (0)
37
38#define STRINGIFY(x) #x
39#define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
40
41
42vk_testing::ErrorCallback error_callback;
43
44bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function)
45{
46    if (error_callback) {
47        error_callback(expr, file, line, function);
48    } else {
49        std::cerr << file << ":" << line << ": " << function <<
50            ": Expectation `" << expr << "' failed.\n";
51    }
52
53    return false;
54}
55
56template<class T, class S>
57std::vector<T> make_objects(const std::vector<S> &v)
58{
59    std::vector<T> objs;
60    objs.reserve(v.size());
61    for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++)
62        objs.push_back((*it)->obj());
63    return objs;
64}
65
66template<typename T>
67std::vector<T> get_memory_reqs(VkDevice device, VkObjectType obj_type, VkObject obj, size_t min_elems)
68{
69    std::vector<T> info;
70
71    info.resize((min_elems > 0)?min_elems:1);
72    if (!EXPECT(vkGetObjectMemoryRequirements(device, obj_type, obj, &info[0]) == VK_SUCCESS))
73        info.clear();
74
75    if (info.size() < min_elems)
76        info.resize(min_elems);
77
78    return info;
79}
80} // namespace
81
82namespace vk_testing {
83
84void set_error_callback(ErrorCallback callback)
85{
86    error_callback = callback;
87}
88
89VkPhysicalDeviceProperties PhysicalGpu::properties() const
90{
91    VkPhysicalDeviceProperties info;
92
93    EXPECT(vkGetPhysicalDeviceProperties(gpu_, &info) == VK_SUCCESS);
94
95    return info;
96}
97
98VkPhysicalDevicePerformance PhysicalGpu::performance() const
99{
100    VkPhysicalDevicePerformance info;
101
102    EXPECT(vkGetPhysicalDevicePerformance(gpu_, &info) == VK_SUCCESS);
103
104    return info;
105}
106
107std::vector<VkPhysicalDeviceQueueProperties> PhysicalGpu::queue_properties() const
108{
109    std::vector<VkPhysicalDeviceQueueProperties> info;
110    uint32_t count;
111
112    if (EXPECT(vkGetPhysicalDeviceQueueCount(gpu_, &count) == VK_SUCCESS)) {
113        info.resize(count);
114        if (!EXPECT(vkGetPhysicalDeviceQueueProperties(gpu_, count, &info[0]) == VK_SUCCESS))
115            info.clear();
116    }
117
118    return info;
119}
120
121VkPhysicalDeviceMemoryProperties PhysicalGpu::memory_properties() const
122{
123    VkPhysicalDeviceMemoryProperties info;
124
125    EXPECT(vkGetPhysicalDeviceMemoryProperties(gpu_, &info) == VK_SUCCESS);
126
127
128    return info;
129}
130
131/*
132 * Return list of Global layers available
133 */
134std::vector<VkLayerProperties> GetGlobalLayers()
135{
136    VkResult err;
137    std::vector<VkLayerProperties> layers;
138    uint32_t layer_count;
139
140    do {
141        layer_count = 0;
142        err = vkGetGlobalLayerProperties(&layer_count, NULL);
143
144        if (err == VK_SUCCESS) {
145            layers.reserve(layer_count);
146            err = vkGetGlobalLayerProperties(&layer_count, &layers[0]);
147        }
148    } while (err == VK_INCOMPLETE);
149
150    assert(err == VK_SUCCESS);
151
152    return layers;
153}
154
155/*
156 * Return list of Global extensions provided by the ICD / Loader
157 */
158std::vector<VkExtensionProperties> GetGlobalExtensions()
159{
160    return GetGlobalExtensions(NULL);
161}
162
163/*
164 * Return list of Global extensions provided by the specified layer
165 * If pLayerName is NULL, will return extensions implemented by the loader / ICDs
166 */
167std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName)
168{
169    std::vector<VkExtensionProperties> exts;
170    uint32_t ext_count;
171    VkResult err;
172
173    do {
174        ext_count = 0;
175        err = vkGetGlobalExtensionProperties(pLayerName, &ext_count, NULL);
176
177        if (err == VK_SUCCESS) {
178            exts.resize(ext_count);
179            err = vkGetGlobalExtensionProperties(pLayerName, &ext_count, &exts[0]);
180        }
181    } while (err == VK_INCOMPLETE);
182
183    assert(err == VK_SUCCESS);
184
185    return exts;
186}
187
188/*
189 * Return list of PhysicalDevice extensions provided by the ICD / Loader
190 */
191std::vector<VkExtensionProperties> PhysicalGpu::extensions() const
192{
193    return extensions(NULL);
194}
195
196/*
197 * Return list of PhysicalDevice extensions provided by the specified layer
198 * If pLayerName is NULL, will return extensions for ICD / loader.
199 */
200std::vector<VkExtensionProperties> PhysicalGpu::extensions(const char *pLayerName) const
201{
202    std::vector<VkExtensionProperties> exts;
203    VkResult err;
204
205    do {
206        uint32_t extCount = 0;
207        err = vkGetPhysicalDeviceExtensionProperties(obj(), pLayerName, &extCount, NULL);
208
209        if (err == VK_SUCCESS) {
210            exts.reserve(extCount);
211            err = vkGetPhysicalDeviceExtensionProperties(obj(), pLayerName, &extCount, &exts[0]);
212        }
213    } while (err == VK_INCOMPLETE);
214
215    assert(err == VK_SUCCESS);
216
217    return exts;
218}
219
220VkResult PhysicalGpu::set_memory_type(const uint32_t type_bits, VkMemoryAllocInfo *info, const VkFlags properties) const
221{
222     uint32_t type_mask = type_bits;
223     // Search memtypes to find first index with those properties
224     for (uint32_t i = 0; i < 32; i++) {
225         if ((type_mask & 1) == 1) {
226             // Type is available, does it match user properties?
227             if ((memory_properties_.memoryTypes[i].propertyFlags & properties) == properties) {
228                 info->memoryTypeIndex = i;
229                 return VK_SUCCESS;
230             }
231         }
232         type_mask >>= 1;
233     }
234     // No memory types matched, return failure
235     return VK_UNSUPPORTED;
236}
237
238/*
239 * Return list of PhysicalDevice layers
240 */
241std::vector<VkLayerProperties> PhysicalGpu::layers() const
242{
243    std::vector<VkLayerProperties> layer_props;
244    VkResult err;
245
246    do {
247        uint32_t layer_count = 0;
248        err = vkGetPhysicalDeviceLayerProperties(obj(), &layer_count, NULL);
249
250        if (err == VK_SUCCESS) {
251            layer_props.reserve(layer_count);
252            err = vkGetPhysicalDeviceLayerProperties(obj(), &layer_count, &layer_props[0]);
253        }
254    } while (err == VK_INCOMPLETE);
255
256    assert(err == VK_SUCCESS);
257
258    return layer_props;
259}
260
261void BaseObject::init(VkObject obj, VkObjectType type, bool own)
262{
263    EXPECT(!initialized());
264    reinit(obj, type, own);
265}
266
267void BaseObject::reinit(VkObject obj, VkObjectType type, bool own)
268{
269    obj_ = obj;
270    object_type_ = type;
271    own_obj_ = own;
272}
273
274uint32_t Object::memory_allocation_count() const
275{
276    return 1;
277}
278
279std::vector<VkMemoryRequirements> Object::memory_requirements() const
280{
281    uint32_t num_allocations = 1;
282    std::vector<VkMemoryRequirements> info =
283        get_memory_reqs<VkMemoryRequirements>(dev_->obj(), type(), obj(), 0);
284    EXPECT(info.size() == num_allocations);
285    if (info.size() == 1 && !info[0].size)
286        info.clear();
287
288    return info;
289}
290
291void Object::init(VkObject obj, VkObjectType object_type, bool own)
292{
293    BaseObject::init(obj, object_type, own);
294    mem_alloc_count_ = memory_allocation_count();
295}
296
297void Object::reinit(VkObject obj, VkObjectType object_type, bool own)
298{
299    cleanup();
300    BaseObject::reinit(obj, object_type, own);
301    mem_alloc_count_ = memory_allocation_count();
302}
303
304void Object::cleanup()
305{
306    if (!initialized())
307        return;
308
309    if (own())
310        EXPECT(vkDestroyObject(dev_->obj(), type(), obj()) == VK_SUCCESS);
311
312    if (internal_mems_) {
313        delete[] internal_mems_;
314        internal_mems_ = NULL;
315        primary_mem_ = NULL;
316    }
317
318    mem_alloc_count_ = 0;
319}
320
321void Object::bind_memory(const GpuMemory &mem, VkDeviceSize mem_offset)
322{
323    bound = true;
324    EXPECT(vkBindObjectMemory(dev_->obj(), type(), obj(), mem.obj(), mem_offset) == VK_SUCCESS);
325}
326
327void Object::alloc_memory()
328{
329    if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
330        return;
331
332    internal_mems_ = new GpuMemory[mem_alloc_count_];
333
334    const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements();
335    VkMemoryAllocInfo info, *next_info = NULL;
336
337    for (int i = 0; i < mem_reqs.size(); i++) {
338        info = GpuMemory::alloc_info(mem_reqs[i], next_info);
339        dev_->gpu().set_memory_type(mem_reqs[i].memoryTypeBits, &info, 0);
340        primary_mem_ = &internal_mems_[i];
341        internal_mems_[i].init(*dev_, info);
342        bind_memory(internal_mems_[i], 0);
343    }
344}
345
346void Object::alloc_memory(VkMemoryPropertyFlags &reqs)
347{
348    if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
349        return;
350
351    internal_mems_ = new GpuMemory[mem_alloc_count_];
352
353    std::vector<VkMemoryRequirements> mem_reqs = memory_requirements();
354    VkMemoryAllocInfo info, *next_info = NULL;
355
356    for (int i = 0; i < mem_reqs.size(); i++) {
357        info = GpuMemory::alloc_info(mem_reqs[i], next_info);
358        dev_->gpu().set_memory_type(mem_reqs[i].memoryTypeBits, &info, reqs);
359        primary_mem_ = &internal_mems_[i];
360        internal_mems_[i].init(*dev_, info);
361        bind_memory(internal_mems_[i], 0);
362    }
363}
364
365void Object::alloc_memory(const std::vector<VkDeviceMemory> &mems)
366{
367    if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
368        return;
369
370    internal_mems_ = new GpuMemory[mem_alloc_count_];
371
372    const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements();
373    if (!EXPECT(mem_reqs.size() == mems.size()))
374        return;
375
376    for (int i = 0; i < mem_reqs.size(); i++) {
377        primary_mem_ = &internal_mems_[i];
378
379        internal_mems_[i].init(*dev_, mems[i]);
380        bind_memory(internal_mems_[i], 0);
381    }
382}
383
384std::vector<VkDeviceMemory> Object::memories() const
385{
386    std::vector<VkDeviceMemory> mems;
387    if (internal_mems_) {
388        mems.reserve(mem_alloc_count_);
389        for (uint32_t i = 0; i < mem_alloc_count_; i++)
390            mems.push_back(internal_mems_[i].obj());
391    }
392
393    return mems;
394}
395
396Device::~Device()
397{
398    if (!initialized())
399        return;
400
401    for (int i = 0; i < QUEUE_COUNT; i++) {
402        for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++)
403            delete *it;
404        queues_[i].clear();
405    }
406
407    EXPECT(vkDestroyDevice(obj()) == VK_SUCCESS);
408}
409
410void Device::init(std::vector<const char *> &extensions)
411{
412    // request all queues
413    const std::vector<VkPhysicalDeviceQueueProperties> queue_props = gpu_.queue_properties();
414    std::vector<VkDeviceQueueCreateInfo> queue_info;
415    queue_info.reserve(queue_props.size());
416    for (int i = 0; i < queue_props.size(); i++) {
417        VkDeviceQueueCreateInfo qi = {};
418        qi.queueNodeIndex = i;
419        qi.queueCount = queue_props[i].queueCount;
420        if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
421            graphics_queue_node_index_ = i;
422        }
423        queue_info.push_back(qi);
424    }
425
426    VkDeviceCreateInfo dev_info = {};
427    dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
428    dev_info.pNext = NULL;
429    dev_info.queueRecordCount = queue_info.size();
430    dev_info.pRequestedQueues = &queue_info[0];
431    dev_info.extensionCount = extensions.size();
432    dev_info.ppEnabledExtensionNames = &extensions[0];
433    dev_info.flags = 0;
434
435    init(dev_info);
436}
437
438void Device::init(const VkDeviceCreateInfo &info)
439{
440    VkDevice obj;
441    if (EXPECT(vkCreateDevice(gpu_.obj(), &info, &obj) == VK_SUCCESS)) {
442        base_type::init(obj, VK_OBJECT_TYPE_DEVICE);
443    }
444
445    init_queues();
446    init_formats();
447}
448
449void Device::init_queues()
450{
451    VkResult err;
452    uint32_t queue_node_count;
453
454    err = vkGetPhysicalDeviceQueueCount(gpu_.obj(), &queue_node_count);
455    EXPECT(err == VK_SUCCESS);
456    EXPECT(queue_node_count >= 1);
457
458    VkPhysicalDeviceQueueProperties* queue_props = new VkPhysicalDeviceQueueProperties[queue_node_count];
459
460    err = vkGetPhysicalDeviceQueueProperties(gpu_.obj(), queue_node_count, queue_props);
461    EXPECT(err == VK_SUCCESS);
462
463    for (uint32_t i = 0; i < queue_node_count; i++) {
464        VkQueue queue;
465
466        for (uint32_t j = 0; j < queue_props[i].queueCount; j++) {
467            // TODO: Need to add support for separate MEMMGR and work queues, including synchronization
468            err = vkGetDeviceQueue(obj(), i, j, &queue);
469            EXPECT(err == VK_SUCCESS);
470
471            if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
472                queues_[GRAPHICS].push_back(new Queue(queue));
473            }
474
475            if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
476                queues_[COMPUTE].push_back(new Queue(queue));
477            }
478
479            if (queue_props[i].queueFlags & VK_QUEUE_DMA_BIT) {
480                queues_[DMA].push_back(new Queue(queue));
481            }
482        }
483    }
484
485    delete[] queue_props;
486
487    EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
488}
489
490void Device::init_formats()
491{
492    for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) {
493        const VkFormat fmt = static_cast<VkFormat>(f);
494        const VkFormatProperties props = format_properties(fmt);
495
496        if (props.linearTilingFeatures) {
497            const Format tmp = { fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures };
498            formats_.push_back(tmp);
499        }
500
501        if (props.optimalTilingFeatures) {
502            const Format tmp = { fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures };
503            formats_.push_back(tmp);
504        }
505    }
506
507    EXPECT(!formats_.empty());
508}
509
510VkFormatProperties Device::format_properties(VkFormat format)
511{
512    VkFormatProperties data;
513    if (!EXPECT(vkGetPhysicalDeviceFormatInfo(gpu().obj(), format, &data) == VK_SUCCESS))
514        memset(&data, 0, sizeof(data));
515
516    return data;
517}
518
519void Device::wait()
520{
521    EXPECT(vkDeviceWaitIdle(obj()) == VK_SUCCESS);
522}
523
524VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout)
525{
526    const std::vector<VkFence> fence_objs = make_objects<VkFence>(fences);
527    VkResult err = vkWaitForFences(obj(), fence_objs.size(), &fence_objs[0], wait_all, timeout);
528    EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
529
530    return err;
531}
532
533VkResult Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes, const std::vector<VkCopyDescriptorSet> &copies)
534{
535    return vkUpdateDescriptorSets(obj(), writes.size(), &writes[0], copies.size(), &copies[0]);
536}
537
538void Queue::submit(const std::vector<const CmdBuffer *> &cmds, Fence &fence)
539{
540    const std::vector<VkCmdBuffer> cmd_objs = make_objects<VkCmdBuffer>(cmds);
541    EXPECT(vkQueueSubmit(obj(), cmd_objs.size(), &cmd_objs[0], fence.obj()) == VK_SUCCESS);
542}
543
544void Queue::submit(const CmdBuffer &cmd, Fence &fence)
545{
546    submit(std::vector<const CmdBuffer*>(1, &cmd), fence);
547}
548
549void Queue::submit(const CmdBuffer &cmd)
550{
551    Fence fence;
552    submit(cmd, fence);
553}
554
555void Queue::wait()
556{
557    EXPECT(vkQueueWaitIdle(obj()) == VK_SUCCESS);
558}
559
560void Queue::signal_semaphore(Semaphore &sem)
561{
562    EXPECT(vkQueueSignalSemaphore(obj(), sem.obj()) == VK_SUCCESS);
563}
564
565void Queue::wait_semaphore(Semaphore &sem)
566{
567    EXPECT(vkQueueWaitSemaphore(obj(), sem.obj()) == VK_SUCCESS);
568}
569
570GpuMemory::~GpuMemory()
571{
572    if (initialized() && own())
573        EXPECT(vkFreeMemory(dev_->obj(), obj()) == VK_SUCCESS);
574}
575
576void GpuMemory::init(const Device &dev, const VkMemoryAllocInfo &info)
577{
578    DERIVED_OBJECT_TYPE_INIT(vkAllocMemory, dev, VK_OBJECT_TYPE_DEVICE_MEMORY, &info);
579}
580
581void GpuMemory::init(const Device &dev, VkDeviceMemory mem)
582{
583    dev_ = &dev;
584    BaseObject::init(mem, VK_OBJECT_TYPE_DEVICE_MEMORY, false);
585}
586
587const void *GpuMemory::map(VkFlags flags) const
588{
589    void *data;
590    if (!EXPECT(vkMapMemory(dev_->obj(), obj(), 0 ,0, flags, &data) == VK_SUCCESS))
591        data = NULL;
592
593    return data;
594}
595
596void *GpuMemory::map(VkFlags flags)
597{
598    void *data;
599    if (!EXPECT(vkMapMemory(dev_->obj(), obj(), 0, 0, flags, &data) == VK_SUCCESS))
600        data = NULL;
601
602    return data;
603}
604
605void GpuMemory::unmap() const
606{
607    EXPECT(vkUnmapMemory(dev_->obj(), obj()) == VK_SUCCESS);
608}
609
610void Fence::init(const Device &dev, const VkFenceCreateInfo &info)
611{
612    DERIVED_OBJECT_TYPE_INIT(vkCreateFence, dev, VK_OBJECT_TYPE_FENCE, &info);
613    alloc_memory();
614}
615
616void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info)
617{
618    DERIVED_OBJECT_TYPE_INIT(vkCreateSemaphore, dev, VK_OBJECT_TYPE_SEMAPHORE, &info);
619    alloc_memory();
620}
621
622void Event::init(const Device &dev, const VkEventCreateInfo &info)
623{
624    DERIVED_OBJECT_TYPE_INIT(vkCreateEvent, dev, VK_OBJECT_TYPE_EVENT, &info);
625    alloc_memory();
626}
627
628void Event::set()
629{
630    EXPECT(vkSetEvent(dev_->obj(), obj()) == VK_SUCCESS);
631}
632
633void Event::reset()
634{
635    EXPECT(vkResetEvent(dev_->obj(), obj()) == VK_SUCCESS);
636}
637
638void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info)
639{
640    DERIVED_OBJECT_TYPE_INIT(vkCreateQueryPool, dev, VK_OBJECT_TYPE_QUERY_POOL, &info);
641    alloc_memory();
642}
643
644VkResult QueryPool::results(uint32_t start, uint32_t count, size_t size, void *data)
645{
646    size_t tmp = size;
647    VkResult err = vkGetQueryPoolResults(dev_->obj(), obj(), start, count, &tmp, data, 0);
648    if (err == VK_SUCCESS) {
649        if (!EXPECT(tmp == size))
650            memset(data, 0, size);
651    } else {
652        EXPECT(err == VK_NOT_READY);
653    }
654
655    return err;
656}
657
658void Buffer::init(const Device &dev, const VkBufferCreateInfo &info)
659{
660    init_no_mem(dev, info);
661    alloc_memory();
662}
663
664void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags &reqs)
665{
666    init_no_mem(dev, info);
667    alloc_memory(reqs);
668}
669
670void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info)
671{
672    DERIVED_OBJECT_TYPE_INIT(vkCreateBuffer, dev, VK_OBJECT_TYPE_BUFFER, &info);
673    create_info_ = info;
674}
675
676void Buffer::bind_memory(VkDeviceSize offset, VkDeviceSize size,
677                         const GpuMemory &mem, VkDeviceSize mem_offset)
678{
679    VkQueue queue = dev_->graphics_queues()[0]->obj();
680    EXPECT(vkQueueBindSparseBufferMemory(queue, obj(), offset, size, mem.obj(), mem_offset) == VK_SUCCESS);
681}
682
683void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info)
684{
685    DERIVED_OBJECT_TYPE_INIT(vkCreateBufferView, dev, VK_OBJECT_TYPE_BUFFER_VIEW, &info);
686    alloc_memory();
687}
688
689void Image::init(const Device &dev, const VkImageCreateInfo &info)
690{
691    init_no_mem(dev, info);
692    alloc_memory();
693}
694
695void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags &reqs)
696{
697    init_no_mem(dev, info);
698    alloc_memory(reqs);
699}
700
701void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info)
702{
703    DERIVED_OBJECT_TYPE_INIT(vkCreateImage, dev, VK_OBJECT_TYPE_IMAGE, &info);
704    init_info(dev, info);
705}
706
707void Image::init_info(const Device &dev, const VkImageCreateInfo &info)
708{
709    create_info_ = info;
710
711    for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
712        if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
713            format_features_ = it->features;
714            break;
715        }
716    }
717}
718
719void Image::bind_memory(const Device &dev, const VkImageMemoryBindInfo &info,
720                        const GpuMemory &mem, VkDeviceSize mem_offset)
721{
722    VkQueue queue = dev.graphics_queues()[0]->obj();
723    EXPECT(vkQueueBindSparseImageMemory(queue, obj(), &info, mem.obj(), mem_offset) == VK_SUCCESS);
724}
725
726VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const
727{
728    VkSubresourceLayout data;
729    size_t size = sizeof(data);
730    if (!EXPECT(vkGetImageSubresourceLayout(dev_->obj(), obj(), &subres, &data) == VK_SUCCESS && size == sizeof(data)))
731        memset(&data, 0, sizeof(data));
732
733    return data;
734}
735
736bool Image::transparent() const
737{
738    return (create_info_.tiling == VK_IMAGE_TILING_LINEAR &&
739            create_info_.samples == 1 &&
740            !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
741                                    VK_IMAGE_USAGE_DEPTH_STENCIL_BIT)));
742}
743
744void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info)
745{
746    DERIVED_OBJECT_TYPE_INIT(vkCreateImageView, dev, VK_OBJECT_TYPE_IMAGE_VIEW, &info);
747    alloc_memory();
748}
749
750void ColorAttachmentView::init(const Device &dev, const VkColorAttachmentViewCreateInfo &info)
751{
752    DERIVED_OBJECT_TYPE_INIT(vkCreateColorAttachmentView, dev, VK_OBJECT_TYPE_COLOR_ATTACHMENT_VIEW, &info);
753    alloc_memory();
754}
755
756void DepthStencilView::init(const Device &dev, const VkDepthStencilViewCreateInfo &info)
757{
758    DERIVED_OBJECT_TYPE_INIT(vkCreateDepthStencilView, dev, VK_OBJECT_TYPE_DEPTH_STENCIL_VIEW, &info);
759    alloc_memory();
760}
761
762void ShaderModule::init(const Device &dev, const VkShaderModuleCreateInfo &info)
763{
764    DERIVED_OBJECT_TYPE_INIT(vkCreateShaderModule, dev, VK_OBJECT_TYPE_SHADER_MODULE, &info);
765}
766
767VkResult ShaderModule::init_try(const Device &dev, const VkShaderModuleCreateInfo &info)
768{
769    /*
770     * Note: Cannot use DERIVED_OBJECT_TYPE_INIT as we need the
771     * return code.
772     */
773    VkShaderModule sh;
774    dev_ = &dev;
775    VkResult err = vkCreateShaderModule(dev.obj(), &info, &sh);
776    if (err == VK_SUCCESS)
777        Object::init(sh, VK_OBJECT_TYPE_SHADER_MODULE);
778
779    return err;
780}
781
782void Shader::init(const Device &dev, const VkShaderCreateInfo &info)
783{
784    DERIVED_OBJECT_TYPE_INIT(vkCreateShader, dev, VK_OBJECT_TYPE_SHADER, &info);
785}
786
787VkResult Shader::init_try(const Device &dev, const VkShaderCreateInfo &info)
788{
789    /*
790     * Note: Cannot use DERIVED_OBJECT_TYPE_INIT as we need the
791     * return code.
792     */
793    VkShader sh;
794    dev_ = &dev;
795    VkResult err = vkCreateShader(dev.obj(), &info, &sh);
796    if (err == VK_SUCCESS)
797        Object::init(sh, VK_OBJECT_TYPE_SHADER);
798
799    return err;
800}
801
802void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
803{
804    DERIVED_OBJECT_TYPE_INIT(vkCreateGraphicsPipeline, dev, VK_OBJECT_TYPE_PIPELINE, &info);
805    alloc_memory();
806}
807
808VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
809{
810    VkPipeline pipe;
811    dev_ = &dev;
812    VkResult err = vkCreateGraphicsPipeline(dev.obj(), &info, &pipe);
813    if (err == VK_SUCCESS) {
814        Object::init(pipe, VK_OBJECT_TYPE_PIPELINE);
815        alloc_memory();
816    }
817
818    return err;
819}
820
821void Pipeline::init(
822        const Device &dev,
823        const VkGraphicsPipelineCreateInfo &info,
824        const VkPipeline basePipeline)
825{
826    DERIVED_OBJECT_TYPE_INIT(vkCreateGraphicsPipelineDerivative, dev, VK_OBJECT_TYPE_PIPELINE, &info, basePipeline);
827    alloc_memory();
828}
829
830void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info)
831{
832    DERIVED_OBJECT_TYPE_INIT(vkCreateComputePipeline, dev, VK_OBJECT_TYPE_PIPELINE, &info);
833    alloc_memory();
834}
835
836void Pipeline::init(const Device&dev, size_t size, const void *data)
837{
838    DERIVED_OBJECT_TYPE_INIT(vkLoadPipeline, dev, VK_OBJECT_TYPE_PIPELINE, size, data);
839    alloc_memory();
840}
841
842void Pipeline::init(
843        const Device&dev,
844        size_t size,
845        const void *data,
846        const VkPipeline basePipeline)
847{
848    DERIVED_OBJECT_TYPE_INIT(vkLoadPipelineDerivative, dev, VK_OBJECT_TYPE_PIPELINE, size, data, basePipeline);
849    alloc_memory();
850}
851
852size_t Pipeline::store(size_t size, void *data)
853{
854    if (!EXPECT(vkStorePipeline(dev_->obj(), obj(), &size, data) == VK_SUCCESS))
855        size = 0;
856
857    return size;
858}
859
860void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info)
861{
862    DERIVED_OBJECT_TYPE_INIT(vkCreateSampler, dev, VK_OBJECT_TYPE_SAMPLER, &info);
863    alloc_memory();
864}
865
866void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info)
867{
868    DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorSetLayout, dev, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, &info);
869    alloc_memory();
870}
871
872void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info, const std::vector<const DescriptorSetLayout *> &layouts)
873{
874    const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts);
875    info.pSetLayouts = &layout_objs[0];
876
877    DERIVED_OBJECT_TYPE_INIT(vkCreatePipelineLayout, dev, VK_OBJECT_TYPE_PIPELINE_LAYOUT, &info);
878    alloc_memory();
879}
880
881void DescriptorPool::init(const Device &dev, VkDescriptorPoolUsage usage,
882                          uint32_t max_sets, const VkDescriptorPoolCreateInfo &info)
883{
884    DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorPool, dev, VK_OBJECT_TYPE_DESCRIPTOR_POOL, usage, max_sets, &info);
885    alloc_memory();
886}
887
888void DescriptorPool::reset()
889{
890    EXPECT(vkResetDescriptorPool(dev_->obj(), obj()) == VK_SUCCESS);
891}
892
893std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts)
894{
895    const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts);
896
897    std::vector<VkDescriptorSet> set_objs;
898    set_objs.resize(layout_objs.size());
899
900    uint32_t set_count;
901    VkResult err = vkAllocDescriptorSets(dev_->obj(), obj(), usage, layout_objs.size(), &layout_objs[0], &set_objs[0], &set_count);
902    if (err == VK_SUCCESS)
903        EXPECT(set_count == set_objs.size());
904    set_objs.resize(set_count);
905
906    std::vector<DescriptorSet *> sets;
907    sets.reserve(set_count);
908    for (std::vector<VkDescriptorSet>::const_iterator it = set_objs.begin(); it != set_objs.end(); it++) {
909        // do descriptor sets need memories bound?
910        DescriptorSet *descriptorSet = new DescriptorSet(dev, *it);
911        sets.push_back(descriptorSet);
912    }
913    return sets;
914}
915
916std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count)
917{
918    return alloc_sets(dev, usage, std::vector<const DescriptorSetLayout *>(count, &layout));
919}
920
921DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout)
922{
923    std::vector<DescriptorSet *> set = alloc_sets(dev, usage, layout, 1);
924    return (set.empty()) ? NULL : set[0];
925}
926
927void DynamicVpStateObject::init(const Device &dev, const VkDynamicVpStateCreateInfo &info)
928{
929    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicViewportState, dev, VK_OBJECT_TYPE_DYNAMIC_VP_STATE, &info);
930    alloc_memory();
931}
932
933void DynamicRsStateObject::init(const Device &dev, const VkDynamicRsStateCreateInfo &info)
934{
935    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicRasterState, dev, VK_OBJECT_TYPE_DYNAMIC_RS_STATE, &info);
936    alloc_memory();
937}
938
939void DynamicCbStateObject::init(const Device &dev, const VkDynamicCbStateCreateInfo &info)
940{
941    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicColorBlendState, dev, VK_OBJECT_TYPE_DYNAMIC_CB_STATE, &info);
942    alloc_memory();
943}
944
945void DynamicDsStateObject::init(const Device &dev, const VkDynamicDsStateCreateInfo &info)
946{
947    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicDepthStencilState, dev, VK_OBJECT_TYPE_DYNAMIC_DS_STATE, &info);
948    alloc_memory();
949}
950
951void CmdBuffer::init(const Device &dev, const VkCmdBufferCreateInfo &info)
952{
953    DERIVED_OBJECT_TYPE_INIT(vkCreateCommandBuffer, dev, VK_OBJECT_TYPE_COMMAND_BUFFER, &info);
954}
955
956void CmdBuffer::begin(const VkCmdBufferBeginInfo *info)
957{
958    EXPECT(vkBeginCommandBuffer(obj(), info) == VK_SUCCESS);
959}
960
961void CmdBuffer::begin()
962{
963    VkCmdBufferBeginInfo info = {};
964    info.flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT |
965          VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT;
966    info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO;
967
968    begin(&info);
969}
970
971void CmdBuffer::end()
972{
973    EXPECT(vkEndCommandBuffer(obj()) == VK_SUCCESS);
974}
975
976void CmdBuffer::reset()
977{
978    EXPECT(vkResetCommandBuffer(obj()) == VK_SUCCESS);
979}
980
981}; // namespace vk_testing
982