vktestbinding.cpp revision 9c6d94f102991d3b8bdca5008f4ab191098d835e
1// VK tests
2//
3// Copyright (C) 2014 LunarG, Inc.
4//
5// Permission is hereby granted, free of charge, to any person obtaining a
6// copy of this software and associated documentation files (the "Software"),
7// to deal in the Software without restriction, including without limitation
8// the rights to use, copy, modify, merge, publish, distribute, sublicense,
9// and/or sell copies of the Software, and to permit persons to whom the
10// Software is furnished to do so, subject to the following conditions:
11//
12// The above copyright notice and this permission notice shall be included
13// in all copies or substantial portions of the Software.
14//
15// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21// DEALINGS IN THE SOFTWARE.
22
23#include <iostream>
24#include <string.h> // memset(), memcmp()
25#include <assert.h>
26#include "vktestbinding.h"
27
28namespace {
29
30#define DERIVED_OBJECT_TYPE_INIT(create_func, dev, vk_object_type, ...)         \
31    do {                                                                        \
32        obj_type obj;                                                           \
33        dev_ = &dev;                                                        \
34        if (EXPECT(create_func(dev.obj(), __VA_ARGS__, &obj) == VK_SUCCESS))    \
35            base_type::init(obj, vk_object_type);                               \
36    } while (0)
37
38#define STRINGIFY(x) #x
39#define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
40
41
42vk_testing::ErrorCallback error_callback;
43
44bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function)
45{
46    if (error_callback) {
47        error_callback(expr, file, line, function);
48    } else {
49        std::cerr << file << ":" << line << ": " << function <<
50            ": Expectation `" << expr << "' failed.\n";
51    }
52
53    return false;
54}
55
56template<class T, class S>
57std::vector<T> make_objects(const std::vector<S> &v)
58{
59    std::vector<T> objs;
60    objs.reserve(v.size());
61    for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++)
62        objs.push_back((*it)->obj());
63    return objs;
64}
65
66template<typename T>
67std::vector<T> get_info(VkPhysicalDevice gpu, VkPhysicalDeviceInfoType type, size_t min_elems)
68{
69    std::vector<T> info;
70    size_t size;
71    if (EXPECT(vkGetPhysicalDeviceInfo(gpu, type, &size, NULL) == VK_SUCCESS && size % sizeof(T) == 0)) {
72        info.resize(size / sizeof(T));
73        if (!EXPECT(vkGetPhysicalDeviceInfo(gpu, type, &size, &info[0]) == VK_SUCCESS && size == info.size() * sizeof(T)))
74            info.clear();
75    }
76
77    if (info.size() < min_elems)
78        info.resize(min_elems);
79
80    return info;
81}
82
83template<typename T>
84std::vector<T> get_info(VkDevice device, VkObjectType object_type, VkObject obj, VkObjectInfoType type, size_t min_elems)
85{
86    std::vector<T> info;
87    size_t size;
88    if (EXPECT(vkGetObjectInfo(device, object_type, obj, type, &size, NULL) == VK_SUCCESS && size % sizeof(T) == 0)) {
89        info.resize(size / sizeof(T));
90        if (!EXPECT(vkGetObjectInfo(device, object_type, obj, type, &size, &info[0]) == VK_SUCCESS && size == info.size() * sizeof(T)))
91            info.clear();
92    }
93
94    if (info.size() < min_elems)
95        info.resize(min_elems);
96
97    return info;
98}
99
100} // namespace
101
102namespace vk_testing {
103
104void set_error_callback(ErrorCallback callback)
105{
106    error_callback = callback;
107}
108
109VkPhysicalDeviceProperties PhysicalGpu::properties() const
110{
111    return get_info<VkPhysicalDeviceProperties>(gpu_, VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES, 1)[0];
112}
113
114VkPhysicalDevicePerformance PhysicalGpu::performance() const
115{
116    return get_info<VkPhysicalDevicePerformance>(gpu_, VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE, 1)[0];
117}
118
119std::vector<VkPhysicalDeviceQueueProperties> PhysicalGpu::queue_properties() const
120{
121    return get_info<VkPhysicalDeviceQueueProperties>(gpu_, VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES, 0);
122}
123
124VkPhysicalDeviceMemoryProperties PhysicalGpu::memory_properties() const
125{
126    return get_info<VkPhysicalDeviceMemoryProperties>(gpu_, VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES, 1)[0];
127}
128
129std::vector<const char *> PhysicalGpu::layers(std::vector<char> &buf) const
130{
131    const size_t max_layer_count = 16;
132    const size_t max_string_size = 256;
133
134    buf.resize(max_layer_count * max_string_size);
135
136    std::vector<const char *> layers;
137    layers.reserve(max_layer_count);
138    for (size_t i = 0; i < max_layer_count; i++)
139        layers.push_back(&buf[0] + max_string_size * i);
140
141    char * const *out = const_cast<char * const *>(&layers[0]);
142    size_t count = max_layer_count; /* allow up to 16 layer names to be returned */
143    if (!EXPECT(vkEnumerateLayers(gpu_, max_string_size, &count, out, NULL) == VK_SUCCESS))
144        count = 0;
145    layers.resize(count);
146
147    return layers;
148}
149
150std::vector<const char *> PhysicalGpu::extensions() const
151{
152    // Extensions to enable
153    static const char *known_exts[] = {
154        "VK_WSI_LunarG",
155    };
156    std::vector<const char *> exts;
157    size_t extSize = sizeof(uint32_t);
158    uint32_t extCount = 0;
159    if (!EXPECT(vkGetGlobalExtensionInfo(VK_EXTENSION_INFO_TYPE_COUNT, 0, &extSize, &extCount) == VK_SUCCESS))
160        return exts;
161
162    VkExtensionProperties extProp;
163    extSize = sizeof(VkExtensionProperties);
164    // TODO : Need to update this if/when we have more than 1 extension to enable
165    for (uint32_t i = 0; i < extCount; i++) {
166        if (!EXPECT(vkGetGlobalExtensionInfo(VK_EXTENSION_INFO_TYPE_PROPERTIES, i, &extSize, &extProp) == VK_SUCCESS))
167            return exts;
168
169        if (!strcmp(known_exts[0], extProp.extName))
170            exts.push_back(known_exts[i]);
171    }
172
173    return exts;
174}
175
176VkPhysicalDeviceCompatibilityInfo PhysicalGpu::compatibility(const PhysicalGpu &other) const
177{
178    VkPhysicalDeviceCompatibilityInfo data;
179    if (!EXPECT(vkGetMultiDeviceCompatibility(gpu_, other.gpu_, &data) == VK_SUCCESS))
180        memset(&data, 0, sizeof(data));
181
182    return data;
183}
184
185void BaseObject::init(VkObject obj, VkObjectType type, bool own)
186{
187    EXPECT(!initialized());
188    reinit(obj, type, own);
189}
190
191void BaseObject::reinit(VkObject obj, VkObjectType type, bool own)
192{
193    obj_ = obj;
194    object_type_ = type;
195    own_obj_ = own;
196}
197
198uint32_t Object::memory_allocation_count() const
199{
200    return get_info<uint32_t>(dev_->obj(), type(), obj(), VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT, 1)[0];
201}
202
203std::vector<VkMemoryRequirements> Object::memory_requirements() const
204{
205    VkResult err;
206    uint32_t num_allocations = 0;
207    size_t num_alloc_size = sizeof(num_allocations);
208    err = vkGetObjectInfo(dev_->obj(), type(), obj(), VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
209                           &num_alloc_size, &num_allocations);
210    EXPECT(err == VK_SUCCESS && num_alloc_size == sizeof(num_allocations));
211    std::vector<VkMemoryRequirements> info =
212        get_info<VkMemoryRequirements>(dev_->obj(), type(), obj(), VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS, 0);
213    EXPECT(info.size() == num_allocations);
214    if (info.size() == 1 && !info[0].size)
215        info.clear();
216
217    return info;
218}
219
220void Object::init(VkObject obj, VkObjectType object_type, bool own)
221{
222    BaseObject::init(obj, object_type, own);
223    mem_alloc_count_ = memory_allocation_count();
224}
225
226void Object::reinit(VkObject obj, VkObjectType object_type, bool own)
227{
228    cleanup();
229    BaseObject::reinit(obj, object_type, own);
230    mem_alloc_count_ = memory_allocation_count();
231}
232
233void Object::cleanup()
234{
235    if (!initialized())
236        return;
237
238    if(bound) {
239       unbind_memory();
240    }
241
242    if (internal_mems_) {
243        delete[] internal_mems_;
244        internal_mems_ = NULL;
245        primary_mem_ = NULL;
246    }
247
248    mem_alloc_count_ = 0;
249
250    if (own())
251        EXPECT(vkDestroyObject(dev_->obj(), type(), obj()) == VK_SUCCESS);
252}
253
254//void Object::bind_memory(const Device &dev, uint32_t alloc_idx, const GpuMemory &mem, VkDeviceSize mem_offset)
255void Object::bind_memory(uint32_t alloc_idx, const GpuMemory &mem, VkDeviceSize mem_offset)
256{
257    bound = true;
258    VkQueue queue = dev_->graphics_queues()[0]->obj();
259    EXPECT(vkQueueBindObjectMemory(queue, type(), obj(), alloc_idx, mem.obj(), mem_offset) == VK_SUCCESS);
260}
261
262//void Object::bind_memory(const Device &dev, uint32_t alloc_idx, VkDeviceSize offset, VkDeviceSize size,
263//                         const GpuMemory &mem, VkDeviceSize mem_offset)
264void Object::bind_memory(uint32_t alloc_idx, VkDeviceSize offset, VkDeviceSize size,
265                         const GpuMemory &mem, VkDeviceSize mem_offset)
266{
267    bound = true;
268    VkQueue queue = dev_->graphics_queues()[0]->obj();
269    EXPECT(!alloc_idx && vkQueueBindObjectMemoryRange(queue, type(), obj(), 0, offset, size, mem.obj(), mem_offset) == VK_SUCCESS);
270}
271
272//void Object::unbind_memory(const Device &dev, uint32_t alloc_idx)
273void Object::unbind_memory(uint32_t alloc_idx)
274{
275    VkQueue queue = dev_->graphics_queues()[0]->obj();
276    EXPECT(vkQueueBindObjectMemory(queue, type(), obj(), alloc_idx, VK_NULL_HANDLE, 0) == VK_SUCCESS);
277}
278
279//void Object::unbind_memory(const Device &dev)
280void Object::unbind_memory()
281{
282    for (uint32_t i = 0; i < mem_alloc_count_; i++)
283        unbind_memory(i);
284}
285
286//void Object::alloc_memory(const Device &dev)
287void Object::alloc_memory()
288{
289    if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
290        return;
291
292    internal_mems_ = new GpuMemory[mem_alloc_count_];
293
294    const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements();
295    VkMemoryAllocInfo info, *next_info = NULL;
296
297    for (int i = 0; i < mem_reqs.size(); i++) {
298        info = GpuMemory::alloc_info(mem_reqs[i], next_info);
299        primary_mem_ = &internal_mems_[i];
300        internal_mems_[i].init(*dev_, info);
301        bind_memory(i, internal_mems_[i], 0);
302    }
303}
304
305//void Object::alloc_memory(const Device &dev, const std::vector<VkDeviceMemory> &mems)
306void Object::alloc_memory(const std::vector<VkDeviceMemory> &mems)
307{
308    if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
309        return;
310
311    internal_mems_ = new GpuMemory[mem_alloc_count_];
312
313    const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements();
314    if (!EXPECT(mem_reqs.size() == mems.size()))
315        return;
316
317    for (int i = 0; i < mem_reqs.size(); i++) {
318        primary_mem_ = &internal_mems_[i];
319
320        internal_mems_[i].init(*dev_, mems[i]);
321        bind_memory(i, internal_mems_[i], 0);
322    }
323}
324
325std::vector<VkDeviceMemory> Object::memories() const
326{
327    std::vector<VkDeviceMemory> mems;
328    if (internal_mems_) {
329        mems.reserve(mem_alloc_count_);
330        for (uint32_t i = 0; i < mem_alloc_count_; i++)
331            mems.push_back(internal_mems_[i].obj());
332    }
333
334    return mems;
335}
336
337Device::~Device()
338{
339    if (!initialized())
340        return;
341
342    for (int i = 0; i < QUEUE_COUNT; i++) {
343        for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++)
344            delete *it;
345        queues_[i].clear();
346    }
347
348    EXPECT(vkDestroyDevice(obj()) == VK_SUCCESS);
349}
350
351void Device::init(bool enable_layers)
352{
353    // request all queues
354    const std::vector<VkPhysicalDeviceQueueProperties> queue_props = gpu_.queue_properties();
355    std::vector<VkDeviceQueueCreateInfo> queue_info;
356    queue_info.reserve(queue_props.size());
357    for (int i = 0; i < queue_props.size(); i++) {
358        VkDeviceQueueCreateInfo qi = {};
359        qi.queueNodeIndex = i;
360        qi.queueCount = queue_props[i].queueCount;
361        if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
362            graphics_queue_node_index_ = i;
363        }
364        queue_info.push_back(qi);
365    }
366
367    VkLayerCreateInfo layer_info = {};
368    layer_info.sType = VK_STRUCTURE_TYPE_LAYER_CREATE_INFO;
369
370    std::vector<const char *> layers;
371    std::vector<char> layer_buf;
372    // request all layers
373    if (enable_layers) {
374        layers = gpu_.layers(layer_buf);
375        layer_info.layerCount = layers.size();
376        layer_info.ppActiveLayerNames = &layers[0];
377    }
378
379    const std::vector<const char *> exts = gpu_.extensions();
380
381    VkDeviceCreateInfo dev_info = {};
382    dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
383    dev_info.pNext = (enable_layers) ? static_cast<void *>(&layer_info) : NULL;
384    dev_info.queueRecordCount = queue_info.size();
385    dev_info.pRequestedQueues = &queue_info[0];
386    dev_info.extensionCount = exts.size();
387    dev_info.ppEnabledExtensionNames = &exts[0];
388    dev_info.flags = VK_DEVICE_CREATE_VALIDATION_BIT;
389
390    init(dev_info);
391}
392
393void Device::init(const VkDeviceCreateInfo &info)
394{
395    VkDevice obj;
396    if (EXPECT(vkCreateDevice(gpu_.obj(), &info, &obj) == VK_SUCCESS)) {
397        base_type::init(obj, VK_OBJECT_TYPE_DEVICE);
398    }
399
400    init_queues();
401    init_formats();
402}
403
404void Device::init_queues()
405{
406    VkResult err;
407    size_t data_size;
408    uint32_t queue_node_count;
409
410    err = vkGetPhysicalDeviceInfo(gpu_.obj(), VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES,
411                        &data_size, NULL);
412    EXPECT(err == VK_SUCCESS);
413
414    queue_node_count = data_size / sizeof(VkPhysicalDeviceQueueProperties);
415    EXPECT(queue_node_count >= 1);
416
417    VkPhysicalDeviceQueueProperties queue_props[queue_node_count];
418
419    err = vkGetPhysicalDeviceInfo(gpu_.obj(), VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES,
420                        &data_size, queue_props);
421    EXPECT(err == VK_SUCCESS);
422
423    for (int i = 0; i < queue_node_count; i++) {
424        VkQueue queue;
425
426        for (int j = 0; j < queue_props[i].queueCount; j++) {
427            // TODO: Need to add support for separate MEMMGR and work queues, including synchronization
428            err = vkGetDeviceQueue(obj(), i, j, &queue);
429            EXPECT(err == VK_SUCCESS);
430
431            if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
432                queues_[GRAPHICS].push_back(new Queue(queue));
433            }
434
435            if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
436                queues_[COMPUTE].push_back(new Queue(queue));
437            }
438
439            if (queue_props[i].queueFlags & VK_QUEUE_DMA_BIT) {
440                queues_[DMA].push_back(new Queue(queue));
441            }
442        }
443    }
444
445    EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
446}
447
448void Device::init_formats()
449{
450    for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) {
451        const VkFormat fmt = static_cast<VkFormat>(f);
452        const VkFormatProperties props = format_properties(fmt);
453
454        if (props.linearTilingFeatures) {
455            const Format tmp = { fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures };
456            formats_.push_back(tmp);
457        }
458
459        if (props.optimalTilingFeatures) {
460            const Format tmp = { fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures };
461            formats_.push_back(tmp);
462        }
463    }
464
465    EXPECT(!formats_.empty());
466}
467
468VkFormatProperties Device::format_properties(VkFormat format)
469{
470    const VkFormatInfoType type = VK_FORMAT_INFO_TYPE_PROPERTIES;
471    VkFormatProperties data;
472    size_t size = sizeof(data);
473    if (!EXPECT(vkGetFormatInfo(obj(), format, type, &size, &data) == VK_SUCCESS && size == sizeof(data)))
474        memset(&data, 0, sizeof(data));
475
476    return data;
477}
478
479void Device::wait()
480{
481    EXPECT(vkDeviceWaitIdle(obj()) == VK_SUCCESS);
482}
483
484VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout)
485{
486    const std::vector<VkFence> fence_objs = make_objects<VkFence>(fences);
487    VkResult err = vkWaitForFences(obj(), fence_objs.size(), &fence_objs[0], wait_all, timeout);
488    EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
489
490    return err;
491}
492
493void Device::begin_descriptor_pool_update(VkDescriptorUpdateMode mode)
494{
495    EXPECT(vkBeginDescriptorPoolUpdate(obj(), mode) == VK_SUCCESS);
496}
497
498void Device::end_descriptor_pool_update(CmdBuffer &cmd)
499{
500    EXPECT(vkEndDescriptorPoolUpdate(obj(), cmd.obj()) == VK_SUCCESS);
501}
502
503void Queue::submit(const std::vector<const CmdBuffer *> &cmds, Fence &fence)
504{
505    const std::vector<VkCmdBuffer> cmd_objs = make_objects<VkCmdBuffer>(cmds);
506    EXPECT(vkQueueSubmit(obj(), cmd_objs.size(), &cmd_objs[0], fence.obj()) == VK_SUCCESS);
507}
508
509void Queue::submit(const CmdBuffer &cmd, Fence &fence)
510{
511    submit(std::vector<const CmdBuffer*>(1, &cmd), fence);
512}
513
514void Queue::submit(const CmdBuffer &cmd)
515{
516    Fence fence;
517    submit(cmd, fence);
518}
519
520void Queue::add_mem_references(const std::vector<VkDeviceMemory> &mem_refs)
521{
522    EXPECT(vkQueueAddMemReferences(obj(), mem_refs.size(), &mem_refs[0]) == VK_SUCCESS);
523}
524
525void Queue::remove_mem_references(const std::vector<VkDeviceMemory> &mem_refs)
526{
527    EXPECT(vkQueueRemoveMemReferences(obj(), mem_refs.size(), &mem_refs[0]) == VK_SUCCESS);
528}
529
530void Queue::wait()
531{
532    EXPECT(vkQueueWaitIdle(obj()) == VK_SUCCESS);
533}
534
535void Queue::signal_semaphore(Semaphore &sem)
536{
537    EXPECT(vkQueueSignalSemaphore(obj(), sem.obj()) == VK_SUCCESS);
538}
539
540void Queue::wait_semaphore(Semaphore &sem)
541{
542    EXPECT(vkQueueWaitSemaphore(obj(), sem.obj()) == VK_SUCCESS);
543}
544
545GpuMemory::~GpuMemory()
546{
547    if (initialized() && own())
548        EXPECT(vkFreeMemory(dev_->obj(), obj()) == VK_SUCCESS);
549}
550
551void GpuMemory::init(const Device &dev, const VkMemoryAllocInfo &info)
552{
553    DERIVED_OBJECT_TYPE_INIT(vkAllocMemory, dev, VK_OBJECT_TYPE_DEVICE_MEMORY, &info);
554}
555
556void GpuMemory::init(const Device &dev, size_t size, const void *data)
557{
558    DERIVED_OBJECT_TYPE_INIT(vkPinSystemMemory, dev, VK_OBJECT_TYPE_DEVICE_MEMORY, data, size);
559}
560
561void GpuMemory::init(const Device &dev, const VkMemoryOpenInfo &info)
562{
563    DERIVED_OBJECT_TYPE_INIT(vkOpenSharedMemory, dev, VK_OBJECT_TYPE_DEVICE_MEMORY, &info);
564}
565
566void GpuMemory::init(const Device &dev, const VkPeerMemoryOpenInfo &info)
567{
568    DERIVED_OBJECT_TYPE_INIT(vkOpenPeerMemory, dev, VK_OBJECT_TYPE_DEVICE_MEMORY, &info);
569}
570
571void GpuMemory::init(const Device &dev, VkDeviceMemory mem)
572{
573    dev_ = &dev;
574    BaseObject::init(mem, VK_OBJECT_TYPE_DEVICE_MEMORY, false);
575}
576
577void GpuMemory::set_priority(VkMemoryPriority priority)
578{
579    EXPECT(vkSetMemoryPriority(dev_->obj(), obj(), priority) == VK_SUCCESS);
580}
581
582const void *GpuMemory::map(VkFlags flags) const
583{
584    void *data;
585    if (!EXPECT(vkMapMemory(dev_->obj(), obj(), 0 ,0, flags, &data) == VK_SUCCESS))
586        data = NULL;
587
588    return data;
589}
590
591void *GpuMemory::map(VkFlags flags)
592{
593    void *data;
594    if (!EXPECT(vkMapMemory(dev_->obj(), obj(), 0, 0, flags, &data) == VK_SUCCESS))
595        data = NULL;
596
597    return data;
598}
599
600void GpuMemory::unmap() const
601{
602    EXPECT(vkUnmapMemory(dev_->obj(), obj()) == VK_SUCCESS);
603}
604
605void Fence::init(const Device &dev, const VkFenceCreateInfo &info)
606{
607    DERIVED_OBJECT_TYPE_INIT(vkCreateFence, dev, VK_OBJECT_TYPE_FENCE, &info);
608    alloc_memory();
609}
610
611void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info)
612{
613    DERIVED_OBJECT_TYPE_INIT(vkCreateSemaphore, dev, VK_OBJECT_TYPE_SEMAPHORE, &info);
614    alloc_memory();
615}
616
617void Semaphore::init(const Device &dev, const VkSemaphoreOpenInfo &info)
618{
619    DERIVED_OBJECT_TYPE_INIT(vkOpenSharedSemaphore, dev, VK_OBJECT_TYPE_SEMAPHORE, &info);
620}
621
622void Event::init(const Device &dev, const VkEventCreateInfo &info)
623{
624    DERIVED_OBJECT_TYPE_INIT(vkCreateEvent, dev, VK_OBJECT_TYPE_EVENT, &info);
625    alloc_memory();
626}
627
628void Event::set()
629{
630    EXPECT(vkSetEvent(dev_->obj(), obj()) == VK_SUCCESS);
631}
632
633void Event::reset()
634{
635    EXPECT(vkResetEvent(dev_->obj(), obj()) == VK_SUCCESS);
636}
637
638void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info)
639{
640    DERIVED_OBJECT_TYPE_INIT(vkCreateQueryPool, dev, VK_OBJECT_TYPE_QUERY_POOL, &info);
641    alloc_memory();
642}
643
644VkResult QueryPool::results(uint32_t start, uint32_t count, size_t size, void *data)
645{
646    size_t tmp = size;
647    VkResult err = vkGetQueryPoolResults(dev_->obj(), obj(), start, count, &tmp, data, 0);
648    if (err == VK_SUCCESS) {
649        if (!EXPECT(tmp == size))
650            memset(data, 0, size);
651    } else {
652        EXPECT(err == VK_NOT_READY);
653    }
654
655    return err;
656}
657
658void Buffer::init(const Device &dev, const VkBufferCreateInfo &info)
659{
660    init_no_mem(dev, info);
661    alloc_memory();
662}
663
664void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info)
665{
666    DERIVED_OBJECT_TYPE_INIT(vkCreateBuffer, dev, VK_OBJECT_TYPE_BUFFER, &info);
667    create_info_ = info;
668}
669
670void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info)
671{
672    DERIVED_OBJECT_TYPE_INIT(vkCreateBufferView, dev, VK_OBJECT_TYPE_BUFFER_VIEW, &info);
673    alloc_memory();
674}
675
676void Image::init(const Device &dev, const VkImageCreateInfo &info)
677{
678    init_no_mem(dev, info);
679    alloc_memory();
680}
681
682void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info)
683{
684    DERIVED_OBJECT_TYPE_INIT(vkCreateImage, dev, VK_OBJECT_TYPE_IMAGE, &info);
685    init_info(dev, info);
686}
687
688void Image::init(const Device &dev, const VkPeerImageOpenInfo &info, const VkImageCreateInfo &original_info)
689{
690    VkImage img;
691    VkDeviceMemory mem;
692    dev_ = &dev;
693    EXPECT(vkOpenPeerImage(dev.obj(), &info, &img, &mem) == VK_SUCCESS);
694    Object::init(img, VK_OBJECT_TYPE_IMAGE);
695
696    init_info(dev, original_info);
697    alloc_memory(std::vector<VkDeviceMemory>(1, mem));
698}
699
700void Image::init_info(const Device &dev, const VkImageCreateInfo &info)
701{
702    create_info_ = info;
703
704    for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
705        if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
706            format_features_ = it->features;
707            break;
708        }
709    }
710}
711
712void Image::bind_memory(const Device &dev, uint32_t alloc_idx, const VkImageMemoryBindInfo &info,
713                        const GpuMemory &mem, VkDeviceSize mem_offset)
714{
715    VkQueue queue = dev.graphics_queues()[0]->obj();
716    EXPECT(!alloc_idx && vkQueueBindImageMemoryRange(queue, obj(), 0, &info, mem.obj(), mem_offset) == VK_SUCCESS);
717}
718
719VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const
720{
721    const VkSubresourceInfoType type = VK_SUBRESOURCE_INFO_TYPE_LAYOUT;
722    VkSubresourceLayout data;
723    size_t size = sizeof(data);
724    if (!EXPECT(vkGetImageSubresourceInfo(dev_->obj(), obj(), &subres, type, &size, &data) == VK_SUCCESS && size == sizeof(data)))
725        memset(&data, 0, sizeof(data));
726
727    return data;
728}
729
730bool Image::transparent() const
731{
732    return (create_info_.tiling == VK_IMAGE_TILING_LINEAR &&
733            create_info_.samples == 1 &&
734            !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
735                                    VK_IMAGE_USAGE_DEPTH_STENCIL_BIT)));
736}
737
738void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info)
739{
740    DERIVED_OBJECT_TYPE_INIT(vkCreateImageView, dev, VK_OBJECT_TYPE_IMAGE_VIEW, &info);
741    alloc_memory();
742}
743
744void ColorAttachmentView::init(const Device &dev, const VkColorAttachmentViewCreateInfo &info)
745{
746    DERIVED_OBJECT_TYPE_INIT(vkCreateColorAttachmentView, dev, VK_OBJECT_TYPE_COLOR_ATTACHMENT_VIEW, &info);
747    alloc_memory();
748}
749
750void DepthStencilView::init(const Device &dev, const VkDepthStencilViewCreateInfo &info)
751{
752    DERIVED_OBJECT_TYPE_INIT(vkCreateDepthStencilView, dev, VK_OBJECT_TYPE_DEPTH_STENCIL_VIEW, &info);
753    alloc_memory();
754}
755
756void Shader::init(const Device &dev, const VkShaderCreateInfo &info)
757{
758    DERIVED_OBJECT_TYPE_INIT(vkCreateShader, dev, VK_OBJECT_TYPE_SHADER, &info);
759}
760
761VkResult Shader::init_try(const Device &dev, const VkShaderCreateInfo &info)
762{
763    /*
764     * Note: Cannot use DERIVED_OBJECT_TYPE_INIT as we need the
765     * return code.
766     */
767    VkShader sh;
768    dev_ = &dev;
769    VkResult err = vkCreateShader(dev.obj(), &info, &sh);
770    if (err == VK_SUCCESS)
771        Object::init(sh, VK_OBJECT_TYPE_SHADER);
772
773    return err;
774}
775
776void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
777{
778    DERIVED_OBJECT_TYPE_INIT(vkCreateGraphicsPipeline, dev, VK_OBJECT_TYPE_PIPELINE, &info);
779    alloc_memory();
780}
781
782void Pipeline::init(
783        const Device &dev,
784        const VkGraphicsPipelineCreateInfo &info,
785        const VkPipeline basePipeline)
786{
787    DERIVED_OBJECT_TYPE_INIT(vkCreateGraphicsPipelineDerivative, dev, VK_OBJECT_TYPE_PIPELINE, &info, basePipeline);
788    alloc_memory();
789}
790
791void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info)
792{
793    DERIVED_OBJECT_TYPE_INIT(vkCreateComputePipeline, dev, VK_OBJECT_TYPE_PIPELINE, &info);
794    alloc_memory();
795}
796
797void Pipeline::init(const Device&dev, size_t size, const void *data)
798{
799    DERIVED_OBJECT_TYPE_INIT(vkLoadPipeline, dev, VK_OBJECT_TYPE_PIPELINE, size, data);
800    alloc_memory();
801}
802
803void Pipeline::init(
804        const Device&dev,
805        size_t size,
806        const void *data,
807        const VkPipeline basePipeline)
808{
809    DERIVED_OBJECT_TYPE_INIT(vkLoadPipelineDerivative, dev, VK_OBJECT_TYPE_PIPELINE, size, data, basePipeline);
810    alloc_memory();
811}
812
813size_t Pipeline::store(size_t size, void *data)
814{
815    if (!EXPECT(vkStorePipeline(dev_->obj(), obj(), &size, data) == VK_SUCCESS))
816        size = 0;
817
818    return size;
819}
820
821void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info)
822{
823    DERIVED_OBJECT_TYPE_INIT(vkCreateSampler, dev, VK_OBJECT_TYPE_SAMPLER, &info);
824    alloc_memory();
825}
826
827void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info)
828{
829    DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorSetLayout, dev, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, &info);
830    alloc_memory();
831}
832
833void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info, const std::vector<const DescriptorSetLayout *> &layouts)
834{
835    const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts);
836    info.pSetLayouts = &layout_objs[0];
837
838    DERIVED_OBJECT_TYPE_INIT(vkCreatePipelineLayout, dev, VK_OBJECT_TYPE_PIPELINE_LAYOUT, &info);
839    alloc_memory();
840}
841
842void DescriptorPool::init(const Device &dev, VkDescriptorPoolUsage usage,
843                          uint32_t max_sets, const VkDescriptorPoolCreateInfo &info)
844{
845    DERIVED_OBJECT_TYPE_INIT(vkCreateDescriptorPool, dev, VK_OBJECT_TYPE_DESCRIPTOR_POOL, usage, max_sets, &info);
846    alloc_memory();
847}
848
849void DescriptorPool::reset()
850{
851    EXPECT(vkResetDescriptorPool(dev_->obj(), obj()) == VK_SUCCESS);
852}
853
854std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts)
855{
856    const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts);
857
858    std::vector<VkDescriptorSet> set_objs;
859    set_objs.resize(layout_objs.size());
860
861    uint32_t set_count;
862    VkResult err = vkAllocDescriptorSets(dev_->obj(), obj(), usage, layout_objs.size(), &layout_objs[0], &set_objs[0], &set_count);
863    if (err == VK_SUCCESS)
864        EXPECT(set_count == set_objs.size());
865    set_objs.resize(set_count);
866
867    std::vector<DescriptorSet *> sets;
868    sets.reserve(set_count);
869    for (std::vector<VkDescriptorSet>::const_iterator it = set_objs.begin(); it != set_objs.end(); it++) {
870        // do descriptor sets need memories bound?
871        DescriptorSet *descriptorSet = new DescriptorSet(dev, *it);
872        sets.push_back(descriptorSet);
873    }
874    return sets;
875}
876
877std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count)
878{
879    return alloc_sets(dev, usage, std::vector<const DescriptorSetLayout *>(count, &layout));
880}
881
882DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout)
883{
884    std::vector<DescriptorSet *> set = alloc_sets(dev, usage, layout, 1);
885    return (set.empty()) ? NULL : set[0];
886}
887
888void DescriptorPool::clear_sets(const std::vector<DescriptorSet *> &sets)
889{
890    const std::vector<VkDescriptorSet> set_objs = make_objects<VkDescriptorSet>(sets);
891    vkClearDescriptorSets(dev_->obj(), obj(), set_objs.size(), &set_objs[0]);
892}
893
894void DescriptorSet::update(const std::vector<const void *> &update_array)
895{
896    vkUpdateDescriptors(dev_->obj(), obj(), update_array.size(), const_cast<const void **>(&update_array[0]));
897}
898
899void DynamicVpStateObject::init(const Device &dev, const VkDynamicVpStateCreateInfo &info)
900{
901    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicViewportState, dev, VK_OBJECT_TYPE_DYNAMIC_VP_STATE, &info);
902    alloc_memory();
903}
904
905void DynamicRsStateObject::init(const Device &dev, const VkDynamicRsStateCreateInfo &info)
906{
907    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicRasterState, dev, VK_OBJECT_TYPE_DYNAMIC_RS_STATE, &info);
908    alloc_memory();
909}
910
911void DynamicCbStateObject::init(const Device &dev, const VkDynamicCbStateCreateInfo &info)
912{
913    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicColorBlendState, dev, VK_OBJECT_TYPE_DYNAMIC_CB_STATE, &info);
914    alloc_memory();
915}
916
917void DynamicDsStateObject::init(const Device &dev, const VkDynamicDsStateCreateInfo &info)
918{
919    DERIVED_OBJECT_TYPE_INIT(vkCreateDynamicDepthStencilState, dev, VK_OBJECT_TYPE_DYNAMIC_DS_STATE, &info);
920    alloc_memory();
921}
922
923void CmdBuffer::init(const Device &dev, const VkCmdBufferCreateInfo &info)
924{
925    DERIVED_OBJECT_TYPE_INIT(vkCreateCommandBuffer, dev, VK_OBJECT_TYPE_COMMAND_BUFFER, &info);
926}
927
928void CmdBuffer::begin(const VkCmdBufferBeginInfo *info)
929{
930    EXPECT(vkBeginCommandBuffer(obj(), info) == VK_SUCCESS);
931}
932
933void CmdBuffer::begin(VkRenderPass renderpass_obj, VkFramebuffer framebuffer_obj)
934{
935    VkCmdBufferBeginInfo info = {};
936    VkCmdBufferGraphicsBeginInfo graphics_cmd_buf_info = {};
937    graphics_cmd_buf_info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO;
938    graphics_cmd_buf_info.pNext = NULL;
939    graphics_cmd_buf_info.renderPassContinue.renderPass = renderpass_obj;
940    graphics_cmd_buf_info.renderPassContinue.framebuffer = framebuffer_obj;
941
942    info.flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT |
943          VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT;
944    info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO;
945    info.pNext = &graphics_cmd_buf_info;
946
947    begin(&info);
948}
949
950void CmdBuffer::begin()
951{
952    VkCmdBufferBeginInfo info = {};
953    info.flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT |
954          VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT;
955    info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO;
956
957    begin(&info);
958}
959
960void CmdBuffer::end()
961{
962    EXPECT(vkEndCommandBuffer(obj()) == VK_SUCCESS);
963}
964
965void CmdBuffer::reset()
966{
967    EXPECT(vkResetCommandBuffer(obj()) == VK_SUCCESS);
968}
969
970}; // namespace vk_testing
971