vktestbinding.cpp revision 18061cdee54b19cd628178d2924a7a914c62a10b
1// VK tests
2//
3// Copyright (C) 2014 LunarG, Inc.
4//
5// Permission is hereby granted, free of charge, to any person obtaining a
6// copy of this software and associated documentation files (the "Software"),
7// to deal in the Software without restriction, including without limitation
8// the rights to use, copy, modify, merge, publish, distribute, sublicense,
9// and/or sell copies of the Software, and to permit persons to whom the
10// Software is furnished to do so, subject to the following conditions:
11//
12// The above copyright notice and this permission notice shall be included
13// in all copies or substantial portions of the Software.
14//
15// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21// DEALINGS IN THE SOFTWARE.
22
23#include <iostream>
24#include <string.h> // memset(), memcmp()
25#include <assert.h>
26#include "vktestbinding.h"
27
28namespace {
29
30#define DERIVED_OBJECT_TYPE_INIT(create_func, dev, vk_object_type, ...)         \
31    do {                                                                        \
32        obj_type obj;                                                           \
33        dev_ = &dev;                                                        \
34        if (EXPECT(create_func(dev.obj(), __VA_ARGS__, &obj) == VK_SUCCESS))    \
35            base_type::init(obj, vk_object_type);                               \
36    } while (0)
37
38#define STRINGIFY(x) #x
39#define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
40
41
42vk_testing::ErrorCallback error_callback;
43
44bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function)
45{
46    if (error_callback) {
47        error_callback(expr, file, line, function);
48    } else {
49        std::cerr << file << ":" << line << ": " << function <<
50            ": Expectation `" << expr << "' failed.\n";
51    }
52
53    return false;
54}
55
56template<class T, class S>
57std::vector<T> make_objects(const std::vector<S> &v)
58{
59    std::vector<T> objs;
60    objs.reserve(v.size());
61    for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++)
62        objs.push_back((*it)->obj());
63    return objs;
64}
65
66template<typename T>
67std::vector<T> get_memory_reqs(VkDevice device, VkObjectType obj_type, VkObject obj, size_t min_elems)
68{
69    std::vector<T> info;
70
71    info.resize((min_elems > 0)?min_elems:1);
72    if (!EXPECT(vkGetObjectMemoryRequirements(device, obj_type, obj, &info[0]) == VK_SUCCESS))
73        info.clear();
74
75    if (info.size() < min_elems)
76        info.resize(min_elems);
77
78    return info;
79}
80} // namespace
81
82namespace vk_testing {
83
84void set_error_callback(ErrorCallback callback)
85{
86    error_callback = callback;
87}
88
89VkPhysicalDeviceProperties PhysicalGpu::properties() const
90{
91    VkPhysicalDeviceProperties info;
92
93    EXPECT(vkGetPhysicalDeviceProperties(gpu_, &info) == VK_SUCCESS);
94
95    return info;
96}
97
98VkPhysicalDevicePerformance PhysicalGpu::performance() const
99{
100    VkPhysicalDevicePerformance info;
101
102    EXPECT(vkGetPhysicalDevicePerformance(gpu_, &info) == VK_SUCCESS);
103
104    return info;
105}
106
107std::vector<VkPhysicalDeviceQueueProperties> PhysicalGpu::queue_properties() const
108{
109    std::vector<VkPhysicalDeviceQueueProperties> info;
110    uint32_t count;
111
112    if (EXPECT(vkGetPhysicalDeviceQueueCount(gpu_, &count) == VK_SUCCESS)) {
113        info.resize(count);
114        if (!EXPECT(vkGetPhysicalDeviceQueueProperties(gpu_, count, &info[0]) == VK_SUCCESS))
115            info.clear();
116    }
117
118    return info;
119}
120
121VkPhysicalDeviceMemoryProperties PhysicalGpu::memory_properties() const
122{
123    VkPhysicalDeviceMemoryProperties info;
124
125    EXPECT(vkGetPhysicalDeviceMemoryProperties(gpu_, &info) == VK_SUCCESS);
126
127
128    return info;
129}
130
131/*
132 * Return list of Global layers available
133 */
134std::vector<VkLayerProperties> GetGlobalLayers()
135{
136    VkResult err;
137
138    std::vector<VkLayerProperties> layers;
139    uint32_t layerCount = 0;
140    err = vkGetGlobalLayerProperties(&layerCount, NULL);
141    while (err == VK_INCOMPLETE) {
142        layerCount = 0;
143        err = vkGetGlobalLayerProperties(&layerCount, NULL);
144    }
145    assert(err == VK_SUCCESS);
146    if (err != VK_SUCCESS) {
147        return layers;
148    }
149
150    layers.reserve(layerCount);
151    err = vkGetGlobalLayerProperties(&layerCount, &layers[0]);
152    assert(err == VK_SUCCESS);
153
154    return layers;
155}
156
157/*
158 * Return list of Global extensions provided by the ICD / Loader
159 */
160std::vector<VkExtensionProperties> GetGlobalExtensions()
161{
162    return GetGlobalExtensions(NULL);
163}
164
165/*
166 * Return list of Global extensions provided by the specified layer
167 * If pLayerName is NULL, will return extensions implemented by the loader / ICDs
168 */
169std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName)
170{
171    VkResult err;
172
173    std::vector<VkExtensionProperties> exts;
174    uint32_t extCount = 0;
175    err = vkGetGlobalExtensionProperties(pLayerName, &extCount, NULL);
176    while (err == VK_INCOMPLETE) {
177        extCount = 0;
178        err = vkGetGlobalExtensionProperties(pLayerName, &extCount, NULL);
179    }
180    assert(err == VK_SUCCESS);
181    if (err != VK_SUCCESS) {
182        return exts;
183    }
184
185    exts.reserve(extCount);
186    err = vkGetGlobalExtensionProperties(pLayerName, &extCount, &exts[0]);
187    assert(err == VK_SUCCESS);
188
189    return exts;
190}
191
192/*
193 * Return list of PhysicalDevice extensions provided by the ICD / Loader
194 */
195std::vector<VkExtensionProperties> PhysicalGpu::extensions() const
196{
197    return extensions(NULL);
198}
199
200/*
201 * Return list of PhysicalDevice extensions provided by the specified layer
202 * If pLayerName is NULL, will return extensions for ICD / loader.
203 */
204std::vector<VkExtensionProperties> PhysicalGpu::extensions(const char *pLayerName) const
205{
206    VkResult err;
207
208    std::vector<VkExtensionProperties> exts;
209    uint32_t extCount = 0;
210    err = vkGetPhysicalDeviceExtensionProperties(obj(), pLayerName, &extCount, NULL);
211    while (err == VK_INCOMPLETE) {
212        extCount = 0;
213        err = vkGetPhysicalDeviceExtensionProperties(obj(), pLayerName, &extCount, NULL);
214    }
215    assert(err == VK_SUCCESS);
216    if (err != VK_SUCCESS) {
217        return exts;
218    }
219
220    exts.reserve(extCount);
221    err = vkGetPhysicalDeviceExtensionProperties(obj(), pLayerName, &extCount, &exts[0]);
222    assert(err == VK_SUCCESS);
223
224    return exts;
225}
226
227VkResult PhysicalGpu::set_memory_type(const uint32_t type_bits, VkMemoryAllocInfo *info, const VkFlags properties) const
228{
229     uint32_t type_mask = type_bits;
230     // Search memtypes to find first index with those properties
231     for (uint32_t i = 0; i < 32; i++) {
232         if ((type_mask & 1) == 1) {
233             // Type is available, does it match user properties?
234             if ((memory_properties_.memoryTypes[i].propertyFlags & properties) == properties) {
235                 info->memoryTypeIndex = i;
236                 return VK_SUCCESS;
237             }
238         }
239         type_mask >>= 1;
240     }
241     // No memory types matched, return failure
242     return VK_UNSUPPORTED;
243}
244
245void BaseObject::init(VkObject obj, VkObjectType type, bool own)
246{
247    EXPECT(!initialized());
248    reinit(obj, type, own);
249}
250
251void BaseObject::reinit(VkObject obj, VkObjectType type, bool own)
252{
253    obj_ = obj;
254    object_type_ = type;
255    own_obj_ = own;
256}
257
258uint32_t Object::memory_allocation_count() const
259{
260    return 1;
261}
262
263std::vector<VkMemoryRequirements> Object::memory_requirements() const
264{
265    uint32_t num_allocations = 1;
266    std::vector<VkMemoryRequirements> info =
267        get_memory_reqs<VkMemoryRequirements>(dev_->obj(), type(), obj(), 0);
268    EXPECT(info.size() == num_allocations);
269    if (info.size() == 1 && !info[0].size)
270        info.clear();
271
272    return info;
273}
274
275void Object::init(VkObject obj, VkObjectType object_type, bool own)
276{
277    BaseObject::init(obj, object_type, own);
278    mem_alloc_count_ = memory_allocation_count();
279}
280
281void Object::reinit(VkObject obj, VkObjectType object_type, bool own)
282{
283    cleanup();
284    BaseObject::reinit(obj, object_type, own);
285    mem_alloc_count_ = memory_allocation_count();
286}
287
288void Object::cleanup()
289{
290    if (!initialized())
291        return;
292
293    if (own())
294        EXPECT(vkDestroyObject(dev_->obj(), type(), obj()) == VK_SUCCESS);
295
296    if (internal_mems_) {
297        delete[] internal_mems_;
298        internal_mems_ = NULL;
299        primary_mem_ = NULL;
300    }
301
302    mem_alloc_count_ = 0;
303}
304
305void Object::bind_memory(const GpuMemory &mem, VkDeviceSize mem_offset)
306{
307    bound = true;
308    EXPECT(vkBindObjectMemory(dev_->obj(), type(), obj(), mem.obj(), mem_offset) == VK_SUCCESS);
309}
310
311void Object::alloc_memory()
312{
313    if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
314        return;
315
316    internal_mems_ = new GpuMemory[mem_alloc_count_];
317
318    const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements();
319    VkMemoryAllocInfo info, *next_info = NULL;
320
321    for (int i = 0; i < mem_reqs.size(); i++) {
322        info = GpuMemory::alloc_info(mem_reqs[i], next_info);
323        dev_->gpu().set_memory_type(mem_reqs[i].memoryTypeBits, &info, 0);
324        primary_mem_ = &internal_mems_[i];
325        internal_mems_[i].init(*dev_, info);
326        bind_memory(internal_mems_[i], 0);
327    }
328}
329
330void Object::alloc_memory(VkMemoryPropertyFlags &reqs)
331{
332    if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
333        return;
334
335    internal_mems_ = new GpuMemory[mem_alloc_count_];
336
337    std::vector<VkMemoryRequirements> mem_reqs = memory_requirements();
338    VkMemoryAllocInfo info, *next_info = NULL;
339
340    for (int i = 0; i < mem_reqs.size(); i++) {
341        info = GpuMemory::alloc_info(mem_reqs[i], next_info);
342        dev_->gpu().set_memory_type(mem_reqs[i].memoryTypeBits, &info, reqs);
343        primary_mem_ = &internal_mems_[i];
344        internal_mems_[i].init(*dev_, info);
345        bind_memory(internal_mems_[i], 0);
346    }
347}
348
349void Object::alloc_memory(const std::vector<VkDeviceMemory> &mems)
350{
351    if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
352        return;
353
354    internal_mems_ = new GpuMemory[mem_alloc_count_];
355
356    const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements();
357    if (!EXPECT(mem_reqs.size() == mems.size()))
358        return;
359
360    for (int i = 0; i < mem_reqs.size(); i++) {
361        primary_mem_ = &internal_mems_[i];
362
363        internal_mems_[i].init(*dev_, mems[i]);
364        bind_memory(internal_mems_[i], 0);
365    }
366}
367
368std::vector<VkDeviceMemory> Object::memories() const
369{
370    std::vector<VkDeviceMemory> mems;
371    if (internal_mems_) {
372        mems.reserve(mem_alloc_count_);
373        for (uint32_t i = 0; i < mem_alloc_count_; i++)
374            mems.push_back(internal_mems_[i].obj());
375    }
376
377    return mems;
378}
379
380Device::~Device()
381{
382    if (!initialized())
383        return;
384
385    for (int i = 0; i < QUEUE_COUNT; i++) {
386        for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++)
387            delete *it;
388        queues_[i].clear();
389    }
390
391    EXPECT(vkDestroyDevice(obj()) == VK_SUCCESS);
392}
393
394void Device::init(std::vector<const char *> &extensions)
395{
396    // request all queues
397    const std::vector<VkPhysicalDeviceQueueProperties> queue_props = gpu_.queue_properties();
398    std::vector<VkDeviceQueueCreateInfo> queue_info;
399    queue_info.reserve(queue_props.size());
400    for (int i = 0; i < queue_props.size(); i++) {
401        VkDeviceQueueCreateInfo qi = {};
402        qi.queueNodeIndex = i;
403        qi.queueCount = queue_props[i].queueCount;
404        if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
405            graphics_queue_node_index_ = i;
406        }
407        queue_info.push_back(qi);
408    }
409
410    VkDeviceCreateInfo dev_info = {};
411    dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
412    dev_info.pNext = NULL;
413    dev_info.queueRecordCount = queue_info.size();
414    dev_info.pRequestedQueues = &queue_info[0];
415    dev_info.extensionCount = extensions.size();
416    dev_info.ppEnabledExtensionNames = &extensions[0];
417    dev_info.flags = 0;
418
419    init(dev_info);
420}
421
422void Device::init(const VkDeviceCreateInfo &info)
423{
424    VkDevice obj;
425    if (EXPECT(vkCreateDevice(gpu_.obj(), &info, &obj) == VK_SUCCESS)) {
426        base_type::init(obj, VK_OBJECT_TYPE_DEVICE);
427    }
428
429    init_queues();
430    init_formats();
431}
432
433void Device::init_queues()
434{
435    VkResult err;
436    uint32_t queue_node_count;
437
438    err = vkGetPhysicalDeviceQueueCount(gpu_.obj(), &queue_node_count);
439    EXPECT(err == VK_SUCCESS);
440    EXPECT(queue_node_count >= 1);
441
442    VkPhysicalDeviceQueueProperties* queue_props = new VkPhysicalDeviceQueueProperties[queue_node_count];
443
444    err = vkGetPhysicalDeviceQueueProperties(gpu_.obj(), queue_node_count, queue_props);
445    EXPECT(err == VK_SUCCESS);
446
447    for (uint32_t i = 0; i < queue_node_count; i++) {
448        VkQueue queue;
449
450        for (uint32_t j = 0; j < queue_props[i].queueCount; j++) {
451            // TODO: Need to add support for separate MEMMGR and work queues, including synchronization
452            err = vkGetDeviceQueue(obj(), i, j, &queue);
453            EXPECT(err == VK_SUCCESS);
454
455            if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
456                queues_[GRAPHICS].push_back(new Queue(queue));
457            }
458
459            if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
460                queues_[COMPUTE].push_back(new Queue(queue));
461            }
462
463            if (queue_props[i].queueFlags & VK_QUEUE_DMA_BIT) {
464                queues_[DMA].push_back(new Queue(queue));
465            }
466        }
467    }
468
469    delete[] queue_props;
470
471    EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
472}
473
474void Device::init_formats()
475{
476    for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) {
477        const VkFormat fmt = static_cast<VkFormat>(f);
478        const VkFormatProperties props = format_properties(fmt);
479
480        if (props.linearTilingFeatures) {
481            const Format tmp = { fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures };
482            formats_.push_back(tmp);
483        }
484
485        if (props.optimalTilingFeatures) {
486            const Format tmp = { fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures };
487            formats_.push_back(tmp);
488        }
489    }
490
491    EXPECT(!formats_.empty());
492}
493
494VkFormatProperties Device::format_properties(VkFormat format)
495{
496    VkFormatProperties data;
497    if (!EXPECT(vkGetPhysicalDeviceFormatInfo(gpu().obj(), format, &data) == VK_SUCCESS))
498        memset(&data, 0, sizeof(data));
499
500    return data;
501}
502
503void Device::wait()
504{
505    EXPECT(vkDeviceWaitIdle(obj()) == VK_SUCCESS);
506}
507
508VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout)
509{
510    const std::vector<VkFence> fence_objs = make_objects<VkFence>(fences);
511    VkResult err = vkWaitForFences(obj(), fence_objs.size(), &fence_objs[0], wait_all, timeout);
512    EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
513
514    return err;
515}
516
517VkResult Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes, const std::vector<VkCopyDescriptorSet> &copies)
518{
519    return vkUpdateDescriptorSets(obj(), writes.size(), &writes[0], copies.size(), &copies[0]);
520}
521
522void Queue::submit(const std::vector<const CmdBuffer *> &cmds, Fence &fence)
523{
524    const std::vector<VkCmdBuffer> cmd_objs = make_objects<VkCmdBuffer>(cmds);
525    EXPECT(vkQueueSubmit(obj(), cmd_objs.size(), &cmd_objs[0], fence.obj()) == VK_SUCCESS);
526}
527
528void Queue::submit(const CmdBuffer &cmd, Fence &fence)
529{
530    submit(std::vector<const CmdBuffer*>(1, &cmd), fence);
531}
532
533void Queue::submit(const CmdBuffer &cmd)
534{
535    Fence fence;
536    submit(cmd, fence);
537}
538
539void Queue::wait()
540{
541    EXPECT(vkQueueWaitIdle(obj()) == VK_SUCCESS);
542}
543
544void Queue::signal_semaphore(Semaphore &sem)
545{
546    EXPECT(vkQueueSignalSemaphore(obj(), sem.obj()) == VK_SUCCESS);
547}
548
549void Queue::wait_semaphore(Semaphore &sem)
550{
551    EXPECT(vkQueueWaitSemaphore(obj(), sem.obj()) == VK_SUCCESS);
552}
553
554GpuMemory::~GpuMemory()
555{
556    if (initialized() && own())
557        EXPECT(vkFreeMemory(dev_->obj(), obj()) == VK_SUCCESS);
558}
559
560void GpuMemory::init(const Device &dev, const VkMemoryAllocInfo &info)
561{
562    DERIVED_OBJECT_TYPE_INIT(vkAllocMemory, dev, VK_OBJECT_TYPE_DEVICE_MEMORY, &info);
563}
564
565void GpuMemory::init(const Device &dev, VkDeviceMemory mem)
566{
567    dev_ = &dev;
568    BaseObject::init(mem, VK_OBJECT_TYPE_DEVICE_MEMORY, false);
569}
570
571const void *GpuMemory::map(VkFlags flags) const
572{
573    void *data;
574    if (!EXPECT(vkMapMemory(dev_->obj(), obj(), 0 ,0, flags, &data) == VK_SUCCESS))
575        data = NULL;
576
577    return data;
578}
579
580void *GpuMemory::map(VkFlags flags)
581{
582    void *data;
583    if (!EXPECT(vkMapMemory(dev_->obj(), obj(), 0, 0, flags, &data) == VK_SUCCESS))
584        data = NULL;
585
586    return data;
587}
588
589void GpuMemory::unmap() const
590{
591    EXPECT(vkUnmapMemory(dev_->obj(), obj()) == VK_SUCCESS);
592}
593
594void Fence::init(const Device &dev, const VkFenceCreateInfo &info)
595{
596    DERIVED_OBJECT_TYPE_INIT(vkCreateFence, dev, VK_OBJECT_TYPE_FENCE, &info);
597    alloc_memory();
598}
599
600void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info)
601{
602    DERIVED_OBJECT_TYPE_INIT(vkCreateSemaphore, dev, VK_OBJECT_TYPE_SEMAPHORE, &info);
603    alloc_memory();
604}
605
606void Event::init(const Device &dev, const VkEventCreateInfo &info)
607{
608    DERIVED_OBJECT_TYPE_INIT(vkCreateEvent, dev, VK_OBJECT_TYPE_EVENT, &info);
609    alloc_memory();
610}
611
612void Event::set()
613{
614    EXPECT(vkSetEvent(dev_->obj(), obj()) == VK_SUCCESS);
615}
616
617void Event::reset()
618{
619    EXPECT(vkResetEvent(dev_->obj(), obj()) == VK_SUCCESS);
620}
621
622void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info)
623{
624    DERIVED_OBJECT_TYPE_INIT(vkCreateQueryPool, dev, VK_OBJECT_TYPE_QUERY_POOL, &info);
625    alloc_memory();
626}
627
628VkResult QueryPool::results(uint32_t start, uint32_t count, size_t size, void *data)
629{
630    size_t tmp = size;
631    VkResult err = vkGetQueryPoolResults(dev_->obj(), obj(), start, count, &tmp, data, 0);
632    if (err == VK_SUCCESS) {
633        if (!EXPECT(tmp == size))
634            memset(data, 0, size);
635    } else {
636        EXPECT(err == VK_NOT_READY);
637    }
638
639    return err;
640}
641
642void Buffer::init(const Device &dev, const VkBufferCreateInfo &info)
643{
644    init_no_mem(dev, info);
645    alloc_memory();
646}
647
648void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags &reqs)
649{
650    init_no_mem(dev, info);
651    alloc_memory(reqs);
652}
653
654void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info)
655{
656    DERIVED_OBJECT_TYPE_INIT(vkCreateBuffer, dev, VK_OBJECT_TYPE_BUFFER, &info);
657    create_info_ = info;
658}
659
660void Buffer::bind_memory(VkDeviceSize offset, VkDeviceSize size,
661                         const GpuMemory &mem, VkDeviceSize mem_offset)
662{
663    VkQueue queue = dev_->graphics_queues()[0]->obj();
664    EXPECT(vkQueueBindSparseBufferMemory(queue, obj(), offset, size, mem.obj(), mem_offset) == VK_SUCCESS);
665}
666
667void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info)
668{
669    DERIVED_OBJECT_TYPE_INIT(vkCreateBufferView, dev, VK_OBJECT_TYPE_BUFFER_VIEW, &info);
670    alloc_memory();
671}
672
673void Image::init(const Device &dev, const VkImageCreateInfo &info)
674{
675    init_no_mem(dev, info);
676    alloc_memory();
677}
678
679void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags &reqs)
680{
681    init_no_mem(dev, info);
682    alloc_memory(reqs);
683}
684
685void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info)
686{
687    DERIVED_OBJECT_TYPE_INIT(vkCreateImage, dev, VK_OBJECT_TYPE_IMAGE, &info);
688    init_info(dev, info);
689}
690
691void Image::init_info(const Device &dev, const VkImageCreateInfo &info)
692{
693    create_info_ = info;
694
695    for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
696        if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
697            format_features_ = it->features;
698            break;
699        }
700    }
701}
702
703void Image::bind_memory(const Device &dev, const VkImageMemoryBindInfo &info,
704                        const GpuMemory &mem, VkDeviceSize mem_offset)
705{
706    VkQueue queue = dev.graphics_queues()[0]->obj();
707    EXPECT(vkQueueBindSparseImageMemory(queue, obj(), &info, mem.obj(), mem_offset) == VK_SUCCESS);
708}
709
710VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const
711{
712    VkSubresourceLayout data;
713    size_t size = sizeof(data);
714    if (!EXPECT(vkGetImageSubresourceLayout(dev_->obj(), obj(), &subres, &data) == VK_SUCCESS && size == sizeof(data)))
715        memset(&data, 0, sizeof(data));
716
717    return data;
718}
719
720bool Image::transparent() const
721{
722    return (create_info_.tiling == VK_IMAGE_TILING_LINEAR &&
723            create_info_.samples == 1 &&
724            !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
725                                    VK_IMAGE_USAGE_DEPTH_STENCIL_BIT)));
726}
727
728void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info)
729{
730    DERIVED_OBJECT_TYPE_INIT(vkCreateImageView, dev, VK_OBJECT_TYPE_IMAGE_VIEW, &info);
731    alloc_memory();
732}
733
734void ColorAttachmentView::init(const Device &dev, const VkColorAttachmentViewCreateInfo &info)
735{
736    DERIVED_OBJECT_TYPE_INIT(vkCreateColorAttachmentView, dev, VK_OBJECT_TYPE_COLOR_ATTACHMENT_VIEW, &info);
737    alloc_memory();
738}
739
740void DepthStencilView::init(const Device &dev, const VkDepthStencilViewCreateInfo &info)
741{
742    DERIVED_OBJECT_TYPE_INIT(vkCreateDepthStencilView, dev, VK_OBJECT_TYPE_DEPTH_STENCIL_VIEW, &info);
743    alloc_memory();
744}
745
746void ShaderModule::init(const Device &dev, const VkShaderModuleCreateInfo &info)
747{
748    DERIVED_OBJECT_TYPE_INIT(vkCreateShaderModule, dev, VK_OBJECT_TYPE_SHADER_MODULE, &info);
749}
750
751VkResult ShaderModule::init_try(const Device &dev, const VkShaderModuleCreateInfo &info)
752{
753    /*
754     * Note: Cannot use DERIVED_OBJECT_TYPE_INIT as we need the
755     * return code.
756     */
757    VkShaderModule sh;
758    dev_ = &dev;
759    VkResult err = vkCreateShaderModule(dev.obj(), &info, &sh);
760    if (err == VK_SUCCESS)
761        Object::init(sh, VK_OBJECT_TYPE_SHADER_MODULE);
762
763    return err;
764}
765
766void Shader::init(const Device &dev, const VkShaderCreateInfo &info)
767{
768    DERIVED_OBJECT_TYPE_INIT(vkCreateShader, dev, VK_OBJECT_TYPE_SHADER, &info);
769}
770
771VkResult Shader::init_try(const Device &dev, const VkShaderCreateInfo &info)
772{
773    /*
774     * Note: Cannot use DERIVED_OBJECT_TYPE_INIT as we need the
775     * return code.
776     */
777    VkShader sh;
778    dev_ = &dev;
779    VkResult err = vkCreateShader(dev.obj(), &info, &sh);
780    if (err == VK_SUCCESS)
781        Object::init(sh, VK_OBJECT_TYPE_SHADER);
782
783    return err;
784}
785
786void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
787{
788    DERIVED_OBJECT_TYPE_INIT(vkCreateGraphicsPipeline, dev, VK_OBJECT_TYPE_PIPELINE, &info);
789    alloc_memory();
790}
791
792VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
793{
794    VkPipeline pipe;
795    dev_ = &dev;
796    VkResult err = vkCreateGraphicsPipeline(dev.obj(), &info, &pipe);
797    if (err == VK_SUCCESS) {
798        Object::init(pipe, VK_OBJECT_TYPE_PIPELINE);
799        alloc_memory();
800    }
801
802    return err;
803}
804
805void Pipeline::init(
806        const Device &dev,
807        const VkGraphicsPipelineCreateInfo &info,
808        const VkPipeline basePipeline)
809{
810    DERIVED_OBJECT_TYPE_INIT(vkCreateGraphicsPipelineDerivative, dev, VK_OBJECT_TYPE_PIPELINE, &info, basePipeline);
811    alloc_memory();
812}
813
814void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info)
815{
816    DERIVED_OBJECT_TYPE_INIT(vkCreateComputePipeline, dev, VK_OBJECT_TYPE_PIPELINE, &info);
817    alloc_memory();
818}
819
820void Pipeline::init(const Device&dev, size_t size, const void *data)
821{
822    DERIVED_OBJECT_TYPE_INIT(vkLoadPipeline, dev, VK_OBJECT_TYPE_PIPELINE, size, data);
823    alloc_memory();
824}
825
826void Pipeline::init(
827        const Device&dev,
828        size_t size,
829        const void *data,
830        const VkPipeline basePipeline)
831{
832    DERIVED_OBJECT_TYPE_INIT(vkLoadPipelineDerivative, dev, VK_OBJECT_TYPE_PIPELINE, size, data, basePipeline);
833    alloc_memory();
834}
835
836size_t Pipeline::store(size_t size, void *data)
837{
838    if (!EXPECT(vkStorePipeline(dev_->obj(), obj(), &size, data) == VK_SUCCESS))
839        size = 0;
840
841    return size;
842}
843
844void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info)
845{
846    DERIVED_OBJECT_TYPE_INIT(vkCreateSampler, dev, VK_OBJECT_TYPE_SAMPLER, &info);
847    alloc_memory();
848}
849
850void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info)
851{
852    DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorSetLayout, dev, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, &info);
853    alloc_memory();
854}
855
856void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info, const std::vector<const DescriptorSetLayout *> &layouts)
857{
858    const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts);
859    info.pSetLayouts = &layout_objs[0];
860
861    DERIVED_OBJECT_TYPE_INIT(vkCreatePipelineLayout, dev, VK_OBJECT_TYPE_PIPELINE_LAYOUT, &info);
862    alloc_memory();
863}
864
865void DescriptorPool::init(const Device &dev, VkDescriptorPoolUsage usage,
866                          uint32_t max_sets, const VkDescriptorPoolCreateInfo &info)
867{
868    DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorPool, dev, VK_OBJECT_TYPE_DESCRIPTOR_POOL, usage, max_sets, &info);
869    alloc_memory();
870}
871
872void DescriptorPool::reset()
873{
874    EXPECT(vkResetDescriptorPool(dev_->obj(), obj()) == VK_SUCCESS);
875}
876
877std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts)
878{
879    const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts);
880
881    std::vector<VkDescriptorSet> set_objs;
882    set_objs.resize(layout_objs.size());
883
884    uint32_t set_count;
885    VkResult err = vkAllocDescriptorSets(dev_->obj(), obj(), usage, layout_objs.size(), &layout_objs[0], &set_objs[0], &set_count);
886    if (err == VK_SUCCESS)
887        EXPECT(set_count == set_objs.size());
888    set_objs.resize(set_count);
889
890    std::vector<DescriptorSet *> sets;
891    sets.reserve(set_count);
892    for (std::vector<VkDescriptorSet>::const_iterator it = set_objs.begin(); it != set_objs.end(); it++) {
893        // do descriptor sets need memories bound?
894        DescriptorSet *descriptorSet = new DescriptorSet(dev, *it);
895        sets.push_back(descriptorSet);
896    }
897    return sets;
898}
899
900std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count)
901{
902    return alloc_sets(dev, usage, std::vector<const DescriptorSetLayout *>(count, &layout));
903}
904
905DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout)
906{
907    std::vector<DescriptorSet *> set = alloc_sets(dev, usage, layout, 1);
908    return (set.empty()) ? NULL : set[0];
909}
910
911void DynamicVpStateObject::init(const Device &dev, const VkDynamicVpStateCreateInfo &info)
912{
913    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicViewportState, dev, VK_OBJECT_TYPE_DYNAMIC_VP_STATE, &info);
914    alloc_memory();
915}
916
917void DynamicRsStateObject::init(const Device &dev, const VkDynamicRsStateCreateInfo &info)
918{
919    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicRasterState, dev, VK_OBJECT_TYPE_DYNAMIC_RS_STATE, &info);
920    alloc_memory();
921}
922
923void DynamicCbStateObject::init(const Device &dev, const VkDynamicCbStateCreateInfo &info)
924{
925    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicColorBlendState, dev, VK_OBJECT_TYPE_DYNAMIC_CB_STATE, &info);
926    alloc_memory();
927}
928
929void DynamicDsStateObject::init(const Device &dev, const VkDynamicDsStateCreateInfo &info)
930{
931    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicDepthStencilState, dev, VK_OBJECT_TYPE_DYNAMIC_DS_STATE, &info);
932    alloc_memory();
933}
934
935void CmdBuffer::init(const Device &dev, const VkCmdBufferCreateInfo &info)
936{
937    DERIVED_OBJECT_TYPE_INIT(vkCreateCommandBuffer, dev, VK_OBJECT_TYPE_COMMAND_BUFFER, &info);
938}
939
940void CmdBuffer::begin(const VkCmdBufferBeginInfo *info)
941{
942    EXPECT(vkBeginCommandBuffer(obj(), info) == VK_SUCCESS);
943}
944
945void CmdBuffer::begin()
946{
947    VkCmdBufferBeginInfo info = {};
948    info.flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT |
949          VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT;
950    info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO;
951
952    begin(&info);
953}
954
955void CmdBuffer::end()
956{
957    EXPECT(vkEndCommandBuffer(obj()) == VK_SUCCESS);
958}
959
960void CmdBuffer::reset()
961{
962    EXPECT(vkResetCommandBuffer(obj()) == VK_SUCCESS);
963}
964
965}; // namespace vk_testing
966