vktestbinding.cpp revision 382489d723fd0d3935da0dc7e1021c56c7b721d3
1// VK tests
2//
3// Copyright (C) 2014 LunarG, Inc.
4//
5// Permission is hereby granted, free of charge, to any person obtaining a
6// copy of this software and associated documentation files (the "Software"),
7// to deal in the Software without restriction, including without limitation
8// the rights to use, copy, modify, merge, publish, distribute, sublicense,
9// and/or sell copies of the Software, and to permit persons to whom the
10// Software is furnished to do so, subject to the following conditions:
11//
12// The above copyright notice and this permission notice shall be included
13// in all copies or substantial portions of the Software.
14//
15// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21// DEALINGS IN THE SOFTWARE.
22
23#include <iostream>
24#include <string.h> // memset(), memcmp()
25#include "vktestbinding.h"
26
27namespace {
28
29#define DERIVED_OBJECT_INIT(create_func, ...)                       \
30    do {                                                            \
31        obj_type obj;                                               \
32        if (EXPECT(create_func(__VA_ARGS__, &obj) == VK_SUCCESS))  \
33            base_type::init(obj);                                   \
34    } while (0)
35
36#define STRINGIFY(x) #x
37#define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
38
39vk_testing::ErrorCallback error_callback;
40
41bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function)
42{
43    if (error_callback) {
44        error_callback(expr, file, line, function);
45    } else {
46        std::cerr << file << ":" << line << ": " << function <<
47            ": Expectation `" << expr << "' failed.\n";
48    }
49
50    return false;
51}
52
53template<class T, class S>
54std::vector<T> make_objects(const std::vector<S> &v)
55{
56    std::vector<T> objs;
57    objs.reserve(v.size());
58    for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++)
59        objs.push_back((*it)->obj());
60    return objs;
61}
62
63template<typename T>
64std::vector<T> get_info(VkPhysicalGpu gpu, VkPhysicalGpuInfoType type, size_t min_elems)
65{
66    std::vector<T> info;
67    size_t size;
68    if (EXPECT(vkGetGpuInfo(gpu, type, &size, NULL) == VK_SUCCESS && size % sizeof(T) == 0)) {
69        info.resize(size / sizeof(T));
70        if (!EXPECT(vkGetGpuInfo(gpu, type, &size, &info[0]) == VK_SUCCESS && size == info.size() * sizeof(T)))
71            info.clear();
72    }
73
74    if (info.size() < min_elems)
75        info.resize(min_elems);
76
77    return info;
78}
79
80template<typename T>
81std::vector<T> get_info(VkBaseObject obj, VkObjectInfoType type, size_t min_elems)
82{
83    std::vector<T> info;
84    size_t size;
85    if (EXPECT(vkGetObjectInfo(obj, type, &size, NULL) == VK_SUCCESS && size % sizeof(T) == 0)) {
86        info.resize(size / sizeof(T));
87        if (!EXPECT(vkGetObjectInfo(obj, type, &size, &info[0]) == VK_SUCCESS && size == info.size() * sizeof(T)))
88            info.clear();
89    }
90
91    if (info.size() < min_elems)
92        info.resize(min_elems);
93
94    return info;
95}
96
97} // namespace
98
99namespace vk_testing {
100
101void set_error_callback(ErrorCallback callback)
102{
103    error_callback = callback;
104}
105
106VkPhysicalGpuProperties PhysicalGpu::properties() const
107{
108    return get_info<VkPhysicalGpuProperties>(gpu_, VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES, 1)[0];
109}
110
111VkPhysicalGpuPerformance PhysicalGpu::performance() const
112{
113    return get_info<VkPhysicalGpuPerformance>(gpu_, VK_INFO_TYPE_PHYSICAL_GPU_PERFORMANCE, 1)[0];
114}
115
116std::vector<VkPhysicalGpuQueueProperties> PhysicalGpu::queue_properties() const
117{
118    return get_info<VkPhysicalGpuQueueProperties>(gpu_, VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES, 0);
119}
120
121VkPhysicalGpuMemoryProperties PhysicalGpu::memory_properties() const
122{
123    return get_info<VkPhysicalGpuMemoryProperties>(gpu_, VK_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES, 1)[0];
124}
125
126std::vector<const char *> PhysicalGpu::layers(std::vector<char> &buf) const
127{
128    const size_t max_layer_count = 16;
129    const size_t max_string_size = 256;
130
131    buf.resize(max_layer_count * max_string_size);
132
133    std::vector<const char *> layers;
134    layers.reserve(max_layer_count);
135    for (size_t i = 0; i < max_layer_count; i++)
136        layers.push_back(&buf[0] + max_string_size * i);
137
138    char * const *out = const_cast<char * const *>(&layers[0]);
139    size_t count;
140    if (!EXPECT(vkEnumerateLayers(gpu_, max_layer_count, max_string_size, &count, out, NULL) == VK_SUCCESS))
141        count = 0;
142    layers.resize(count);
143
144    return layers;
145}
146
147std::vector<const char *> PhysicalGpu::extensions() const
148{
149    static const char *known_exts[] = {
150        "VK_WSI_X11",
151    };
152
153    std::vector<const char *> exts;
154    for (int i = 0; i < sizeof(known_exts) / sizeof(known_exts[0]); i++) {
155        VkResult err = vkGetExtensionSupport(gpu_, known_exts[i]);
156        if (err == VK_SUCCESS)
157            exts.push_back(known_exts[i]);
158    }
159
160    return exts;
161}
162
163VkGpuCompatibilityInfo PhysicalGpu::compatibility(const PhysicalGpu &other) const
164{
165    VkGpuCompatibilityInfo data;
166    if (!EXPECT(vkGetMultiGpuCompatibility(gpu_, other.gpu_, &data) == VK_SUCCESS))
167        memset(&data, 0, sizeof(data));
168
169    return data;
170}
171
172void BaseObject::init(VkBaseObject obj, bool own)
173{
174    EXPECT(!initialized());
175    reinit(obj, own);
176}
177
178void BaseObject::reinit(VkBaseObject obj, bool own)
179{
180    obj_ = obj;
181    own_obj_ = own;
182}
183
184uint32_t BaseObject::memory_allocation_count() const
185{
186    return get_info<uint32_t>(obj_, VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT, 1)[0];
187}
188
189std::vector<VkMemoryRequirements> BaseObject::memory_requirements() const
190{
191    VkResult err;
192    uint32_t num_allocations = 0;
193    size_t num_alloc_size = sizeof(num_allocations);
194    err = vkGetObjectInfo(obj_, VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
195                           &num_alloc_size, &num_allocations);
196    EXPECT(err == VK_SUCCESS && num_alloc_size == sizeof(num_allocations));
197    std::vector<VkMemoryRequirements> info =
198        get_info<VkMemoryRequirements>(obj_, VK_INFO_TYPE_MEMORY_REQUIREMENTS, 0);
199    EXPECT(info.size() == num_allocations);
200    if (info.size() == 1 && !info[0].size)
201        info.clear();
202
203    return info;
204}
205
206void Object::init(VkObject obj, bool own)
207{
208    BaseObject::init(obj, own);
209    mem_alloc_count_ = memory_allocation_count();
210}
211
212void Object::reinit(VkObject obj, bool own)
213{
214    cleanup();
215    BaseObject::reinit(obj, own);
216    mem_alloc_count_ = memory_allocation_count();
217}
218
219void Object::cleanup()
220{
221    if (!initialized())
222        return;
223
224    if(bound) {
225        unbind_memory();
226    }
227
228    if (internal_mems_) {
229        delete[] internal_mems_;
230        internal_mems_ = NULL;
231        primary_mem_ = NULL;
232    }
233
234    mem_alloc_count_ = 0;
235
236    if (own())
237        EXPECT(vkDestroyObject(obj()) == VK_SUCCESS);
238}
239
240void Object::bind_memory(uint32_t alloc_idx, const GpuMemory &mem, VkGpuSize mem_offset)
241{
242    bound = true;
243    EXPECT(vkBindObjectMemory(obj(), alloc_idx, mem.obj(), mem_offset) == VK_SUCCESS);
244}
245
246void Object::bind_memory(uint32_t alloc_idx, VkGpuSize offset, VkGpuSize size,
247                         const GpuMemory &mem, VkGpuSize mem_offset)
248{
249    bound = true;
250    EXPECT(!alloc_idx && vkBindObjectMemoryRange(obj(), 0, offset, size, mem.obj(), mem_offset) == VK_SUCCESS);
251}
252
253void Object::unbind_memory(uint32_t alloc_idx)
254{
255    EXPECT(vkBindObjectMemory(obj(), alloc_idx, VK_NULL_HANDLE, 0) == VK_SUCCESS);
256}
257
258void Object::unbind_memory()
259{
260    for (uint32_t i = 0; i < mem_alloc_count_; i++)
261        unbind_memory(i);
262}
263
264void Object::alloc_memory(const Device &dev, bool for_buf, bool for_img)
265{
266    if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
267        return;
268
269    internal_mems_ = new GpuMemory[mem_alloc_count_];
270
271    const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements();
272    std::vector<VkImageMemoryRequirements> img_reqs;
273    std::vector<VkBufferMemoryRequirements> buf_reqs;
274    VkMemoryAllocImageInfo img_info;
275    VkMemoryAllocBufferInfo buf_info;
276    VkMemoryAllocInfo info, *next_info = NULL;
277
278    if (for_img) {
279        img_reqs = get_info<VkImageMemoryRequirements>(obj(),
280                        VK_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS, 0);
281        EXPECT(img_reqs.size() == 1);
282        next_info = (VkMemoryAllocInfo *) &img_info;
283        img_info.pNext = NULL;
284        img_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOC_IMAGE_INFO;
285        img_info.usage = img_reqs[0].usage;
286        img_info.formatClass = img_reqs[0].formatClass;
287        img_info.samples = img_reqs[0].samples;
288    }
289
290
291    if (for_buf) {
292        buf_reqs = get_info<VkBufferMemoryRequirements>(obj(),
293                        VK_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS, 0);
294        if (for_img)
295            img_info.pNext = &buf_info;
296        else
297            next_info = (VkMemoryAllocInfo *) &buf_info;
298        buf_info.pNext = NULL;
299        buf_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOC_BUFFER_INFO;
300        buf_info.usage = buf_reqs[0].usage;
301    }
302
303
304    for (int i = 0; i < mem_reqs.size(); i++) {
305        info = GpuMemory::alloc_info(mem_reqs[i], next_info);
306
307        switch (info.memType) {
308        case VK_MEMORY_TYPE_BUFFER:
309            EXPECT(for_buf);
310            info.memProps |= VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT;
311            primary_mem_ = &internal_mems_[i];
312            break;
313        case VK_MEMORY_TYPE_IMAGE:
314            EXPECT(for_img);
315            primary_mem_ = &internal_mems_[i];
316            break;
317        default:
318            break;
319        }
320
321        internal_mems_[i].init(dev, info);
322        bind_memory(i, internal_mems_[i], 0);
323    }
324}
325
326void Object::alloc_memory(const std::vector<VkGpuMemory> &mems)
327{
328    if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
329        return;
330
331    internal_mems_ = new GpuMemory[mem_alloc_count_];
332
333    const std::vector<VkMemoryRequirements> mem_reqs = memory_requirements();
334    if (!EXPECT(mem_reqs.size() == mems.size()))
335        return;
336
337    for (int i = 0; i < mem_reqs.size(); i++) {
338        primary_mem_ = &internal_mems_[i];
339
340        internal_mems_[i].init(mems[i]);
341        bind_memory(i, internal_mems_[i], 0);
342    }
343}
344
345std::vector<VkGpuMemory> Object::memories() const
346{
347    std::vector<VkGpuMemory> mems;
348    if (internal_mems_) {
349        mems.reserve(mem_alloc_count_);
350        for (uint32_t i = 0; i < mem_alloc_count_; i++)
351            mems.push_back(internal_mems_[i].obj());
352    }
353
354    return mems;
355}
356
357Device::~Device()
358{
359    if (!initialized())
360        return;
361
362    for (int i = 0; i < QUEUE_COUNT; i++) {
363        for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++)
364            delete *it;
365        queues_[i].clear();
366    }
367
368    EXPECT(vkDestroyDevice(obj()) == VK_SUCCESS);
369}
370
371void Device::init(bool enable_layers)
372{
373    // request all queues
374    const std::vector<VkPhysicalGpuQueueProperties> queue_props = gpu_.queue_properties();
375    std::vector<VkDeviceQueueCreateInfo> queue_info;
376    queue_info.reserve(queue_props.size());
377    for (int i = 0; i < queue_props.size(); i++) {
378        VkDeviceQueueCreateInfo qi = {};
379        qi.queueNodeIndex = i;
380        qi.queueCount = queue_props[i].queueCount;
381        if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
382            graphics_queue_node_index_ = i;
383        }
384        queue_info.push_back(qi);
385    }
386
387    VkLayerCreateInfo layer_info = {};
388    layer_info.sType = VK_STRUCTURE_TYPE_LAYER_CREATE_INFO;
389
390    std::vector<const char *> layers;
391    std::vector<char> layer_buf;
392    // request all layers
393    if (enable_layers) {
394        layers = gpu_.layers(layer_buf);
395        layer_info.layerCount = layers.size();
396        layer_info.ppActiveLayerNames = &layers[0];
397    }
398
399    const std::vector<const char *> exts = gpu_.extensions();
400
401    VkDeviceCreateInfo dev_info = {};
402    dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
403    dev_info.pNext = (enable_layers) ? static_cast<void *>(&layer_info) : NULL;
404    dev_info.queueRecordCount = queue_info.size();
405    dev_info.pRequestedQueues = &queue_info[0];
406    dev_info.extensionCount = exts.size();
407    dev_info.ppEnabledExtensionNames = &exts[0];
408    dev_info.maxValidationLevel = VK_VALIDATION_LEVEL_END_RANGE;
409    dev_info.flags = VK_DEVICE_CREATE_VALIDATION_BIT;
410
411    init(dev_info);
412}
413
414void Device::init(const VkDeviceCreateInfo &info)
415{
416    DERIVED_OBJECT_INIT(vkCreateDevice, gpu_.obj(), &info);
417
418    init_queues();
419    init_formats();
420}
421
422void Device::init_queues()
423{
424    VkResult err;
425    size_t data_size;
426    uint32_t queue_node_count;
427
428    err = vkGetGpuInfo(gpu_.obj(), VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES,
429                        &data_size, NULL);
430    EXPECT(err == VK_SUCCESS);
431
432    queue_node_count = data_size / sizeof(VkPhysicalGpuQueueProperties);
433    EXPECT(queue_node_count >= 1);
434
435    VkPhysicalGpuQueueProperties queue_props[queue_node_count];
436
437    err = vkGetGpuInfo(gpu_.obj(), VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES,
438                        &data_size, queue_props);
439    EXPECT(err == VK_SUCCESS);
440
441    for (int i = 0; i < queue_node_count; i++) {
442        VkQueue queue;
443
444        for (int j = 0; j < queue_props[i].queueCount; j++) {
445            err = vkGetDeviceQueue(obj(), i, j, &queue);
446            EXPECT(err == VK_SUCCESS);
447
448            if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
449                queues_[GRAPHICS].push_back(new Queue(queue));
450            }
451
452            if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
453                queues_[COMPUTE].push_back(new Queue(queue));
454            }
455
456            if (queue_props[i].queueFlags & VK_QUEUE_DMA_BIT) {
457                queues_[DMA].push_back(new Queue(queue));
458            }
459        }
460    }
461
462    EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
463}
464
465void Device::init_formats()
466{
467    for (int f = VK_FMT_BEGIN_RANGE; f <= VK_FMT_END_RANGE; f++) {
468        const VkFormat fmt = static_cast<VkFormat>(f);
469        const VkFormatProperties props = format_properties(fmt);
470
471        if (props.linearTilingFeatures) {
472            const Format tmp = { fmt, VK_LINEAR_TILING, props.linearTilingFeatures };
473            formats_.push_back(tmp);
474        }
475
476        if (props.optimalTilingFeatures) {
477            const Format tmp = { fmt, VK_OPTIMAL_TILING, props.optimalTilingFeatures };
478            formats_.push_back(tmp);
479        }
480    }
481
482    EXPECT(!formats_.empty());
483}
484
485VkFormatProperties Device::format_properties(VkFormat format)
486{
487    const VkFormatInfoType type = VK_INFO_TYPE_FORMAT_PROPERTIES;
488    VkFormatProperties data;
489    size_t size = sizeof(data);
490    if (!EXPECT(vkGetFormatInfo(obj(), format, type, &size, &data) == VK_SUCCESS && size == sizeof(data)))
491        memset(&data, 0, sizeof(data));
492
493    return data;
494}
495
496void Device::wait()
497{
498    EXPECT(vkDeviceWaitIdle(obj()) == VK_SUCCESS);
499}
500
501VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout)
502{
503    const std::vector<VkFence> fence_objs = make_objects<VkFence>(fences);
504    VkResult err = vkWaitForFences(obj(), fence_objs.size(), &fence_objs[0], wait_all, timeout);
505    EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
506
507    return err;
508}
509
510void Device::begin_descriptor_pool_update(VkDescriptorUpdateMode mode)
511{
512    EXPECT(vkBeginDescriptorPoolUpdate(obj(), mode) == VK_SUCCESS);
513}
514
515void Device::end_descriptor_pool_update(CmdBuffer &cmd)
516{
517    EXPECT(vkEndDescriptorPoolUpdate(obj(), cmd.obj()) == VK_SUCCESS);
518}
519
520void Queue::submit(const std::vector<const CmdBuffer *> &cmds, Fence &fence)
521{
522    const std::vector<VkCmdBuffer> cmd_objs = make_objects<VkCmdBuffer>(cmds);
523    EXPECT(vkQueueSubmit(obj(), cmd_objs.size(), &cmd_objs[0], fence.obj()) == VK_SUCCESS);
524}
525
526void Queue::submit(const CmdBuffer &cmd, Fence &fence)
527{
528    submit(std::vector<const CmdBuffer*>(1, &cmd), fence);
529}
530
531void Queue::submit(const CmdBuffer &cmd)
532{
533    Fence fence;
534    submit(cmd, fence);
535}
536
537void Queue::add_mem_references(const std::vector<VkGpuMemory> &mem_refs)
538{
539    for (int i = 0; i < mem_refs.size(); i++) {
540        EXPECT(vkQueueAddMemReference(obj(), mem_refs[i]) == VK_SUCCESS);
541    }
542}
543
544void Queue::remove_mem_references(const std::vector<VkGpuMemory> &mem_refs)
545{
546    for (int i = 0; i < mem_refs.size(); i++) {
547        EXPECT(vkQueueRemoveMemReference(obj(), mem_refs[i]) == VK_SUCCESS);
548    }
549}
550
551void Queue::wait()
552{
553    EXPECT(vkQueueWaitIdle(obj()) == VK_SUCCESS);
554}
555
556void Queue::signal_semaphore(Semaphore &sem)
557{
558    EXPECT(vkQueueSignalSemaphore(obj(), sem.obj()) == VK_SUCCESS);
559}
560
561void Queue::wait_semaphore(Semaphore &sem)
562{
563    EXPECT(vkQueueWaitSemaphore(obj(), sem.obj()) == VK_SUCCESS);
564}
565
566GpuMemory::~GpuMemory()
567{
568    if (initialized() && own())
569        EXPECT(vkFreeMemory(obj()) == VK_SUCCESS);
570}
571
572void GpuMemory::init(const Device &dev, const VkMemoryAllocInfo &info)
573{
574    DERIVED_OBJECT_INIT(vkAllocMemory, dev.obj(), &info);
575}
576
577void GpuMemory::init(const Device &dev, size_t size, const void *data)
578{
579    DERIVED_OBJECT_INIT(vkPinSystemMemory, dev.obj(), data, size);
580}
581
582void GpuMemory::init(const Device &dev, const VkMemoryOpenInfo &info)
583{
584    DERIVED_OBJECT_INIT(vkOpenSharedMemory, dev.obj(), &info);
585}
586
587void GpuMemory::init(const Device &dev, const VkPeerMemoryOpenInfo &info)
588{
589    DERIVED_OBJECT_INIT(vkOpenPeerMemory, dev.obj(), &info);
590}
591
592void GpuMemory::set_priority(VkMemoryPriority priority)
593{
594    EXPECT(vkSetMemoryPriority(obj(), priority) == VK_SUCCESS);
595}
596
597const void *GpuMemory::map(VkFlags flags) const
598{
599    void *data;
600    if (!EXPECT(vkMapMemory(obj(), flags, &data) == VK_SUCCESS))
601        data = NULL;
602
603    return data;
604}
605
606void *GpuMemory::map(VkFlags flags)
607{
608    void *data;
609    if (!EXPECT(vkMapMemory(obj(), flags, &data) == VK_SUCCESS))
610        data = NULL;
611
612    return data;
613}
614
615void GpuMemory::unmap() const
616{
617    EXPECT(vkUnmapMemory(obj()) == VK_SUCCESS);
618}
619
620void Fence::init(const Device &dev, const VkFenceCreateInfo &info)
621{
622    DERIVED_OBJECT_INIT(vkCreateFence, dev.obj(), &info);
623    alloc_memory(dev);
624}
625
626void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info)
627{
628    DERIVED_OBJECT_INIT(vkCreateSemaphore, dev.obj(), &info);
629    alloc_memory(dev);
630}
631
632void Semaphore::init(const Device &dev, const VkSemaphoreOpenInfo &info)
633{
634    DERIVED_OBJECT_INIT(vkOpenSharedSemaphore, dev.obj(), &info);
635}
636
637void Event::init(const Device &dev, const VkEventCreateInfo &info)
638{
639    DERIVED_OBJECT_INIT(vkCreateEvent, dev.obj(), &info);
640    alloc_memory(dev);
641}
642
643void Event::set()
644{
645    EXPECT(vkSetEvent(obj()) == VK_SUCCESS);
646}
647
648void Event::reset()
649{
650    EXPECT(vkResetEvent(obj()) == VK_SUCCESS);
651}
652
653void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info)
654{
655    DERIVED_OBJECT_INIT(vkCreateQueryPool, dev.obj(), &info);
656    alloc_memory(dev);
657}
658
659VkResult QueryPool::results(uint32_t start, uint32_t count, size_t size, void *data)
660{
661    size_t tmp = size;
662    VkResult err = vkGetQueryPoolResults(obj(), start, count, &tmp, data);
663    if (err == VK_SUCCESS) {
664        if (!EXPECT(tmp == size))
665            memset(data, 0, size);
666    } else {
667        EXPECT(err == VK_NOT_READY);
668    }
669
670    return err;
671}
672
673void Buffer::init(const Device &dev, const VkBufferCreateInfo &info)
674{
675    init_no_mem(dev, info);
676    alloc_memory(dev, true, false);
677}
678
679void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info)
680{
681    DERIVED_OBJECT_INIT(vkCreateBuffer, dev.obj(), &info);
682    create_info_ = info;
683}
684
685void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info)
686{
687    DERIVED_OBJECT_INIT(vkCreateBufferView, dev.obj(), &info);
688    alloc_memory(dev);
689}
690
691void Image::init(const Device &dev, const VkImageCreateInfo &info)
692{
693    init_no_mem(dev, info);
694    alloc_memory(dev, info.tiling == VK_LINEAR_TILING, true);
695}
696
697void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info)
698{
699    DERIVED_OBJECT_INIT(vkCreateImage, dev.obj(), &info);
700    init_info(dev, info);
701}
702
703void Image::init(const Device &dev, const VkPeerImageOpenInfo &info, const VkImageCreateInfo &original_info)
704{
705    VkImage img;
706    VkGpuMemory mem;
707    EXPECT(vkOpenPeerImage(dev.obj(), &info, &img, &mem) == VK_SUCCESS);
708    Object::init(img);
709
710    init_info(dev, original_info);
711    alloc_memory(std::vector<VkGpuMemory>(1, mem));
712}
713
714void Image::init_info(const Device &dev, const VkImageCreateInfo &info)
715{
716    create_info_ = info;
717
718    for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
719        if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
720            format_features_ = it->features;
721            break;
722        }
723    }
724}
725
726void Image::bind_memory(uint32_t alloc_idx, const VkImageMemoryBindInfo &info,
727                        const GpuMemory &mem, VkGpuSize mem_offset)
728{
729    EXPECT(!alloc_idx && vkBindImageMemoryRange(obj(), 0, &info, mem.obj(), mem_offset) == VK_SUCCESS);
730}
731
732VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const
733{
734    const VkSubresourceInfoType type = VK_INFO_TYPE_SUBRESOURCE_LAYOUT;
735    VkSubresourceLayout data;
736    size_t size = sizeof(data);
737    if (!EXPECT(vkGetImageSubresourceInfo(obj(), &subres, type, &size, &data) == VK_SUCCESS && size == sizeof(data)))
738        memset(&data, 0, sizeof(data));
739
740    return data;
741}
742
743bool Image::transparent() const
744{
745    return (create_info_.tiling == VK_LINEAR_TILING &&
746            create_info_.samples == 1 &&
747            !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
748                                    VK_IMAGE_USAGE_DEPTH_STENCIL_BIT)));
749}
750
751void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info)
752{
753    DERIVED_OBJECT_INIT(vkCreateImageView, dev.obj(), &info);
754    alloc_memory(dev);
755}
756
757void ColorAttachmentView::init(const Device &dev, const VkColorAttachmentViewCreateInfo &info)
758{
759    DERIVED_OBJECT_INIT(vkCreateColorAttachmentView, dev.obj(), &info);
760    alloc_memory(dev);
761}
762
763void DepthStencilView::init(const Device &dev, const VkDepthStencilViewCreateInfo &info)
764{
765    DERIVED_OBJECT_INIT(vkCreateDepthStencilView, dev.obj(), &info);
766    alloc_memory(dev);
767}
768
769void Shader::init(const Device &dev, const VkShaderCreateInfo &info)
770{
771    DERIVED_OBJECT_INIT(vkCreateShader, dev.obj(), &info);
772}
773
774VkResult Shader::init_try(const Device &dev, const VkShaderCreateInfo &info)
775{
776    VkShader sh;
777    VkResult err = vkCreateShader(dev.obj(), &info, &sh);
778    if (err == VK_SUCCESS)
779        Object::init(sh);
780
781    return err;
782}
783
784void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
785{
786    DERIVED_OBJECT_INIT(vkCreateGraphicsPipeline, dev.obj(), &info);
787    alloc_memory(dev);
788}
789
790void Pipeline::init(
791        const Device &dev,
792        const VkGraphicsPipelineCreateInfo &info,
793        const VkPipeline basePipeline)
794{
795    DERIVED_OBJECT_INIT(vkCreateGraphicsPipelineDerivative, dev.obj(), &info, basePipeline);
796    alloc_memory(dev);
797}
798
799void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info)
800{
801    DERIVED_OBJECT_INIT(vkCreateComputePipeline, dev.obj(), &info);
802    alloc_memory(dev);
803}
804
805void Pipeline::init(const Device&dev, size_t size, const void *data)
806{
807    DERIVED_OBJECT_INIT(vkLoadPipeline, dev.obj(), size, data);
808    alloc_memory(dev);
809}
810
811void Pipeline::init(
812        const Device&dev,
813        size_t size,
814        const void *data,
815        const VkPipeline basePipeline)
816{
817    DERIVED_OBJECT_INIT(vkLoadPipelineDerivative, dev.obj(), size, data, basePipeline);
818    alloc_memory(dev);
819}
820
821size_t Pipeline::store(size_t size, void *data)
822{
823    if (!EXPECT(vkStorePipeline(obj(), &size, data) == VK_SUCCESS))
824        size = 0;
825
826    return size;
827}
828
829void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info)
830{
831    DERIVED_OBJECT_INIT(vkCreateSampler, dev.obj(), &info);
832    alloc_memory(dev);
833}
834
835void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info)
836{
837    DERIVED_OBJECT_INIT(vkCreateDescriptorSetLayout, dev.obj(), &info);
838    alloc_memory(dev);
839}
840
841void DescriptorSetLayoutChain::init(const Device &dev, const std::vector<const DescriptorSetLayout *> &layouts)
842{
843    const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts);
844
845    DERIVED_OBJECT_INIT(vkCreateDescriptorSetLayoutChain, dev.obj(), layout_objs.size(), &layout_objs[0]);
846    alloc_memory(dev);
847}
848
849void DescriptorPool::init(const Device &dev, VkDescriptorPoolUsage usage,
850                          uint32_t max_sets, const VkDescriptorPoolCreateInfo &info)
851{
852    DERIVED_OBJECT_INIT(vkCreateDescriptorPool, dev.obj(), usage, max_sets, &info);
853    alloc_memory(dev);
854}
855
856void DescriptorPool::reset()
857{
858    EXPECT(vkResetDescriptorPool(obj()) == VK_SUCCESS);
859}
860
861std::vector<DescriptorSet *> DescriptorPool::alloc_sets(VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts)
862{
863    const std::vector<VkDescriptorSetLayout> layout_objs = make_objects<VkDescriptorSetLayout>(layouts);
864
865    std::vector<VkDescriptorSet> set_objs;
866    set_objs.resize(layout_objs.size());
867
868    uint32_t set_count;
869    VkResult err = vkAllocDescriptorSets(obj(), usage, layout_objs.size(), &layout_objs[0], &set_objs[0], &set_count);
870    if (err == VK_SUCCESS)
871        EXPECT(set_count == set_objs.size());
872    set_objs.resize(set_count);
873
874    std::vector<DescriptorSet *> sets;
875    sets.reserve(set_count);
876    for (std::vector<VkDescriptorSet>::const_iterator it = set_objs.begin(); it != set_objs.end(); it++) {
877        // do descriptor sets need memories bound?
878        sets.push_back(new DescriptorSet(*it));
879    }
880
881    return sets;
882}
883
884std::vector<DescriptorSet *> DescriptorPool::alloc_sets(VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count)
885{
886    return alloc_sets(usage, std::vector<const DescriptorSetLayout *>(count, &layout));
887}
888
889DescriptorSet *DescriptorPool::alloc_sets(VkDescriptorSetUsage usage, const DescriptorSetLayout &layout)
890{
891    std::vector<DescriptorSet *> set = alloc_sets(usage, layout, 1);
892    return (set.empty()) ? NULL : set[0];
893}
894
895void DescriptorPool::clear_sets(const std::vector<DescriptorSet *> &sets)
896{
897    const std::vector<VkDescriptorSet> set_objs = make_objects<VkDescriptorSet>(sets);
898    vkClearDescriptorSets(obj(), set_objs.size(), &set_objs[0]);
899}
900
901void DescriptorSet::update(const std::vector<const void *> &update_array)
902{
903    vkUpdateDescriptors(obj(), update_array.size(), const_cast<const void **>(&update_array[0]));
904}
905
906void DynamicVpStateObject::init(const Device &dev, const VkDynamicVpStateCreateInfo &info)
907{
908    DERIVED_OBJECT_INIT(vkCreateDynamicViewportState, dev.obj(), &info);
909    alloc_memory(dev);
910}
911
912void DynamicRsStateObject::init(const Device &dev, const VkDynamicRsStateCreateInfo &info)
913{
914    DERIVED_OBJECT_INIT(vkCreateDynamicRasterState, dev.obj(), &info);
915    alloc_memory(dev);
916}
917
918void DynamicCbStateObject::init(const Device &dev, const VkDynamicCbStateCreateInfo &info)
919{
920    DERIVED_OBJECT_INIT(vkCreateDynamicColorBlendState, dev.obj(), &info);
921    alloc_memory(dev);
922}
923
924void DynamicDsStateObject::init(const Device &dev, const VkDynamicDsStateCreateInfo &info)
925{
926    DERIVED_OBJECT_INIT(vkCreateDynamicDepthStencilState, dev.obj(), &info);
927    alloc_memory(dev);
928}
929
930void CmdBuffer::init(const Device &dev, const VkCmdBufferCreateInfo &info)
931{
932    DERIVED_OBJECT_INIT(vkCreateCommandBuffer, dev.obj(), &info);
933}
934
935void CmdBuffer::begin(const VkCmdBufferBeginInfo *info)
936{
937    EXPECT(vkBeginCommandBuffer(obj(), info) == VK_SUCCESS);
938}
939
940void CmdBuffer::begin(VkRenderPass renderpass_obj, VkFramebuffer framebuffer_obj)
941{
942    VkCmdBufferBeginInfo info = {};
943    VkCmdBufferGraphicsBeginInfo graphics_cmd_buf_info = {};
944    graphics_cmd_buf_info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO;
945    graphics_cmd_buf_info.pNext = NULL;
946    graphics_cmd_buf_info.renderPassContinue.renderPass = renderpass_obj;
947    graphics_cmd_buf_info.renderPassContinue.framebuffer = framebuffer_obj;
948
949    info.flags = VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
950          VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT;
951    info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO;
952    info.pNext = &graphics_cmd_buf_info;
953
954    begin(&info);
955}
956
957void CmdBuffer::begin()
958{
959    VkCmdBufferBeginInfo info = {};
960    info.flags = VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
961          VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT;
962    info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO;
963
964    begin(&info);
965}
966
967void CmdBuffer::end()
968{
969    EXPECT(vkEndCommandBuffer(obj()) == VK_SUCCESS);
970}
971
972void CmdBuffer::reset()
973{
974    EXPECT(vkResetCommandBuffer(obj()) == VK_SUCCESS);
975}
976
977}; // namespace vk_testing
978