vktestbinding.cpp revision 902d08122fca004bc59fecd27efe08a653ce31ec
1// VK tests
2//
3// Copyright (C) 2014 LunarG, Inc.
4//
5// Permission is hereby granted, free of charge, to any person obtaining a
6// copy of this software and associated documentation files (the "Software"),
7// to deal in the Software without restriction, including without limitation
8// the rights to use, copy, modify, merge, publish, distribute, sublicense,
9// and/or sell copies of the Software, and to permit persons to whom the
10// Software is furnished to do so, subject to the following conditions:
11//
12// The above copyright notice and this permission notice shall be included
13// in all copies or substantial portions of the Software.
14//
15// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21// DEALINGS IN THE SOFTWARE.
22
23#include <iostream>
24#include <string.h> // memset(), memcmp()
25#include <assert.h>
26#include <stdarg.h>
27#include "vktestbinding.h"
28
29namespace {
30
31#define NON_DISPATCHABLE_HANDLE_INIT(create_func, dev, ...)                         \
32    do {                                                                            \
33        handle_type handle;                                                         \
34        if (EXPECT(create_func(dev.handle(), __VA_ARGS__, &handle) == VK_SUCCESS))  \
35            NonDispHandle::init(dev.handle(), handle);                              \
36    } while (0)
37
38#define NON_DISPATCHABLE_HANDLE_DTOR(cls, destroy_func)                        \
39    cls::~cls()                                                                     \
40    {                                                                               \
41        if (initialized())                                                          \
42            EXPECT(destroy_func(device(), handle()) == VK_SUCCESS);    \
43    }
44
45#define STRINGIFY(x) #x
46#define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
47
48
49vk_testing::ErrorCallback error_callback;
50
51bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function)
52{
53    if (error_callback) {
54        error_callback(expr, file, line, function);
55    } else {
56        std::cerr << file << ":" << line << ": " << function <<
57            ": Expectation `" << expr << "' failed.\n";
58    }
59
60    return false;
61}
62
63template<class T, class S>
64std::vector<T> make_handles(const std::vector<S> &v)
65{
66    std::vector<T> handles;
67    handles.reserve(v.size());
68    for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++)
69        handles.push_back((*it)->handle());
70    return handles;
71}
72
73VkMemoryAllocInfo get_resource_alloc_info(const vk_testing::Device &dev, const VkMemoryRequirements &reqs, VkMemoryPropertyFlags mem_props)
74{
75    VkMemoryAllocInfo info = vk_testing::DeviceMemory::alloc_info(reqs.size, 0);
76    dev.phy().set_memory_type(reqs.memoryTypeBits, &info, mem_props);
77
78    return info;
79}
80
81} // namespace
82
83namespace vk_testing {
84
85void set_error_callback(ErrorCallback callback)
86{
87    error_callback = callback;
88}
89
90VkPhysicalDeviceProperties PhysicalDevice::properties() const
91{
92    VkPhysicalDeviceProperties info;
93
94    EXPECT(vkGetPhysicalDeviceProperties(handle(), &info) == VK_SUCCESS);
95
96    return info;
97}
98
99VkPhysicalDevicePerformance PhysicalDevice::performance() const
100{
101    VkPhysicalDevicePerformance info;
102
103    EXPECT(vkGetPhysicalDevicePerformance(handle(), &info) == VK_SUCCESS);
104
105    return info;
106}
107
108std::vector<VkPhysicalDeviceQueueProperties> PhysicalDevice::queue_properties() const
109{
110    std::vector<VkPhysicalDeviceQueueProperties> info;
111    uint32_t count;
112
113    if (EXPECT(vkGetPhysicalDeviceQueueCount(handle(), &count) == VK_SUCCESS)) {
114        info.resize(count);
115        if (!EXPECT(vkGetPhysicalDeviceQueueProperties(handle(), count, &info[0]) == VK_SUCCESS))
116            info.clear();
117    }
118
119    return info;
120}
121
122VkPhysicalDeviceMemoryProperties PhysicalDevice::memory_properties() const
123{
124    VkPhysicalDeviceMemoryProperties info;
125
126    EXPECT(vkGetPhysicalDeviceMemoryProperties(handle(), &info) == VK_SUCCESS);
127
128
129    return info;
130}
131
132/*
133 * Return list of Global layers available
134 */
135std::vector<VkLayerProperties> GetGlobalLayers()
136{
137    VkResult err;
138    std::vector<VkLayerProperties> layers;
139    uint32_t layer_count;
140
141    do {
142        layer_count = 0;
143        err = vkGetGlobalLayerProperties(&layer_count, NULL);
144
145        if (err == VK_SUCCESS) {
146            layers.reserve(layer_count);
147            err = vkGetGlobalLayerProperties(&layer_count, &layers[0]);
148        }
149    } while (err == VK_INCOMPLETE);
150
151    assert(err == VK_SUCCESS);
152
153    return layers;
154}
155
156/*
157 * Return list of Global extensions provided by the ICD / Loader
158 */
159std::vector<VkExtensionProperties> GetGlobalExtensions()
160{
161    return GetGlobalExtensions(NULL);
162}
163
164/*
165 * Return list of Global extensions provided by the specified layer
166 * If pLayerName is NULL, will return extensions implemented by the loader / ICDs
167 */
168std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName)
169{
170    std::vector<VkExtensionProperties> exts;
171    uint32_t ext_count;
172    VkResult err;
173
174    do {
175        ext_count = 0;
176        err = vkGetGlobalExtensionProperties(pLayerName, &ext_count, NULL);
177
178        if (err == VK_SUCCESS) {
179            exts.resize(ext_count);
180            err = vkGetGlobalExtensionProperties(pLayerName, &ext_count, &exts[0]);
181        }
182    } while (err == VK_INCOMPLETE);
183
184    assert(err == VK_SUCCESS);
185
186    return exts;
187}
188
189/*
190 * Return list of PhysicalDevice extensions provided by the ICD / Loader
191 */
192std::vector<VkExtensionProperties> PhysicalDevice::extensions() const
193{
194    return extensions(NULL);
195}
196
197/*
198 * Return list of PhysicalDevice extensions provided by the specified layer
199 * If pLayerName is NULL, will return extensions for ICD / loader.
200 */
201std::vector<VkExtensionProperties> PhysicalDevice::extensions(const char *pLayerName) const
202{
203    std::vector<VkExtensionProperties> exts;
204    VkResult err;
205
206    do {
207        uint32_t extCount = 0;
208        err = vkGetPhysicalDeviceExtensionProperties(handle(), pLayerName, &extCount, NULL);
209
210        if (err == VK_SUCCESS) {
211            exts.reserve(extCount);
212            err = vkGetPhysicalDeviceExtensionProperties(handle(), pLayerName, &extCount, &exts[0]);
213        }
214    } while (err == VK_INCOMPLETE);
215
216    assert(err == VK_SUCCESS);
217
218    return exts;
219}
220
221VkResult PhysicalDevice::set_memory_type(const uint32_t type_bits, VkMemoryAllocInfo *info, const VkFlags properties) const
222{
223     uint32_t type_mask = type_bits;
224     // Search memtypes to find first index with those properties
225     for (uint32_t i = 0; i < 32; i++) {
226         if ((type_mask & 1) == 1) {
227             // Type is available, does it match user properties?
228             if ((memory_properties_.memoryTypes[i].propertyFlags & properties) == properties) {
229                 info->memoryTypeIndex = i;
230                 return VK_SUCCESS;
231             }
232         }
233         type_mask >>= 1;
234     }
235     // No memory types matched, return failure
236     return VK_UNSUPPORTED;
237}
238
239/*
240 * Return list of PhysicalDevice layers
241 */
242std::vector<VkLayerProperties> PhysicalDevice::layers() const
243{
244    std::vector<VkLayerProperties> layer_props;
245    VkResult err;
246
247    do {
248        uint32_t layer_count = 0;
249        err = vkGetPhysicalDeviceLayerProperties(handle(), &layer_count, NULL);
250
251        if (err == VK_SUCCESS) {
252            layer_props.reserve(layer_count);
253            err = vkGetPhysicalDeviceLayerProperties(handle(), &layer_count, &layer_props[0]);
254        }
255    } while (err == VK_INCOMPLETE);
256
257    assert(err == VK_SUCCESS);
258
259    return layer_props;
260}
261
262Device::~Device()
263{
264    if (!initialized())
265        return;
266
267    for (int i = 0; i < QUEUE_COUNT; i++) {
268        for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++)
269            delete *it;
270        queues_[i].clear();
271    }
272
273    EXPECT(vkDestroyDevice(handle()) == VK_SUCCESS);
274}
275
276void Device::init(std::vector<const char *> &layers, std::vector<const char *> &extensions)
277{
278    // request all queues
279    const std::vector<VkPhysicalDeviceQueueProperties> queue_props = phy_.queue_properties();
280    std::vector<VkDeviceQueueCreateInfo> queue_info;
281    queue_info.reserve(queue_props.size());
282    for (int i = 0; i < queue_props.size(); i++) {
283        VkDeviceQueueCreateInfo qi = {};
284        qi.queueNodeIndex = i;
285        qi.queueCount = queue_props[i].queueCount;
286        if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
287            graphics_queue_node_index_ = i;
288        }
289        queue_info.push_back(qi);
290    }
291
292    VkDeviceCreateInfo dev_info = {};
293    dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
294    dev_info.pNext = NULL;
295    dev_info.queueRecordCount = queue_info.size();
296    dev_info.pRequestedQueues = &queue_info[0];
297    dev_info.layerCount = layers.size();
298    dev_info.ppEnabledLayerNames = &layers[0];
299    dev_info.extensionCount = extensions.size();
300    dev_info.ppEnabledExtensionNames = &extensions[0];
301    dev_info.flags = 0;
302
303    init(dev_info);
304}
305
306void Device::init(const VkDeviceCreateInfo &info)
307{
308    VkDevice dev;
309
310    if (EXPECT(vkCreateDevice(phy_.handle(), &info, &dev) == VK_SUCCESS))
311        Handle::init(dev);
312
313    init_queues();
314    init_formats();
315}
316
317void Device::init_queues()
318{
319    VkResult err;
320    uint32_t queue_node_count;
321
322    err = vkGetPhysicalDeviceQueueCount(phy_.handle(), &queue_node_count);
323    EXPECT(err == VK_SUCCESS);
324    EXPECT(queue_node_count >= 1);
325
326    VkPhysicalDeviceQueueProperties* queue_props = new VkPhysicalDeviceQueueProperties[queue_node_count];
327
328    err = vkGetPhysicalDeviceQueueProperties(phy_.handle(), queue_node_count, queue_props);
329    EXPECT(err == VK_SUCCESS);
330
331    for (uint32_t i = 0; i < queue_node_count; i++) {
332        VkQueue queue;
333
334        for (uint32_t j = 0; j < queue_props[i].queueCount; j++) {
335            // TODO: Need to add support for separate MEMMGR and work queues, including synchronization
336            err = vkGetDeviceQueue(handle(), i, j, &queue);
337            EXPECT(err == VK_SUCCESS);
338
339            if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
340                queues_[GRAPHICS].push_back(new Queue(queue));
341            }
342
343            if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
344                queues_[COMPUTE].push_back(new Queue(queue));
345            }
346
347            if (queue_props[i].queueFlags & VK_QUEUE_DMA_BIT) {
348                queues_[DMA].push_back(new Queue(queue));
349            }
350        }
351    }
352
353    delete[] queue_props;
354
355    EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
356}
357
358void Device::init_formats()
359{
360    for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) {
361        const VkFormat fmt = static_cast<VkFormat>(f);
362        const VkFormatProperties props = format_properties(fmt);
363
364        if (props.linearTilingFeatures) {
365            const Format tmp = { fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures };
366            formats_.push_back(tmp);
367        }
368
369        if (props.optimalTilingFeatures) {
370            const Format tmp = { fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures };
371            formats_.push_back(tmp);
372        }
373    }
374
375    EXPECT(!formats_.empty());
376}
377
378VkFormatProperties Device::format_properties(VkFormat format)
379{
380    VkFormatProperties data;
381    if (!EXPECT(vkGetPhysicalDeviceFormatInfo(phy().handle(), format, &data) == VK_SUCCESS))
382        memset(&data, 0, sizeof(data));
383
384    return data;
385}
386
387void Device::wait()
388{
389    EXPECT(vkDeviceWaitIdle(handle()) == VK_SUCCESS);
390}
391
392VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout)
393{
394    const std::vector<VkFence> fence_handles = make_handles<VkFence>(fences);
395    VkResult err = vkWaitForFences(handle(), fence_handles.size(), &fence_handles[0], wait_all, timeout);
396    EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
397
398    return err;
399}
400
401VkResult Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes, const std::vector<VkCopyDescriptorSet> &copies)
402{
403    return vkUpdateDescriptorSets(handle(), writes.size(), &writes[0], copies.size(), &copies[0]);
404}
405
406void Queue::submit(const std::vector<const CmdBuffer *> &cmds, Fence &fence)
407{
408    const std::vector<VkCmdBuffer> cmd_handles = make_handles<VkCmdBuffer>(cmds);
409    EXPECT(vkQueueSubmit(handle(), cmd_handles.size(), &cmd_handles[0], fence.handle()) == VK_SUCCESS);
410}
411
412void Queue::submit(const CmdBuffer &cmd, Fence &fence)
413{
414    submit(std::vector<const CmdBuffer*>(1, &cmd), fence);
415}
416
417void Queue::submit(const CmdBuffer &cmd)
418{
419    Fence fence;
420    submit(cmd, fence);
421}
422
423void Queue::wait()
424{
425    EXPECT(vkQueueWaitIdle(handle()) == VK_SUCCESS);
426}
427
428void Queue::signal_semaphore(Semaphore &sem)
429{
430    EXPECT(vkQueueSignalSemaphore(handle(), sem.handle()) == VK_SUCCESS);
431}
432
433void Queue::wait_semaphore(Semaphore &sem)
434{
435    EXPECT(vkQueueWaitSemaphore(handle(), sem.handle()) == VK_SUCCESS);
436}
437
438DeviceMemory::~DeviceMemory()
439{
440    if (initialized())
441        EXPECT(vkFreeMemory(device(), handle()) == VK_SUCCESS);
442}
443
444void DeviceMemory::init(const Device &dev, const VkMemoryAllocInfo &info)
445{
446    NON_DISPATCHABLE_HANDLE_INIT(vkAllocMemory, dev, &info);
447}
448
449const void *DeviceMemory::map(VkFlags flags) const
450{
451    void *data;
452    if (!EXPECT(vkMapMemory(device(), handle(), 0 ,0, flags, &data) == VK_SUCCESS))
453        data = NULL;
454
455    return data;
456}
457
458void *DeviceMemory::map(VkFlags flags)
459{
460    void *data;
461    if (!EXPECT(vkMapMemory(device(), handle(), 0, 0, flags, &data) == VK_SUCCESS))
462        data = NULL;
463
464    return data;
465}
466
467void DeviceMemory::unmap() const
468{
469    EXPECT(vkUnmapMemory(device(), handle()) == VK_SUCCESS);
470}
471
472NON_DISPATCHABLE_HANDLE_DTOR(Fence, vkDestroyFence)
473
474void Fence::init(const Device &dev, const VkFenceCreateInfo &info)
475{
476    NON_DISPATCHABLE_HANDLE_INIT(vkCreateFence, dev, &info);
477}
478
479NON_DISPATCHABLE_HANDLE_DTOR(Semaphore, vkDestroySemaphore)
480
481void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info)
482{
483    NON_DISPATCHABLE_HANDLE_INIT(vkCreateSemaphore, dev, &info);
484}
485
486NON_DISPATCHABLE_HANDLE_DTOR(Event, vkDestroyEvent)
487
488void Event::init(const Device &dev, const VkEventCreateInfo &info)
489{
490    NON_DISPATCHABLE_HANDLE_INIT(vkCreateEvent, dev, &info);
491}
492
493void Event::set()
494{
495    EXPECT(vkSetEvent(device(), handle()) == VK_SUCCESS);
496}
497
498void Event::reset()
499{
500    EXPECT(vkResetEvent(device(), handle()) == VK_SUCCESS);
501}
502
503NON_DISPATCHABLE_HANDLE_DTOR(QueryPool, vkDestroyQueryPool)
504
505void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info)
506{
507    NON_DISPATCHABLE_HANDLE_INIT(vkCreateQueryPool, dev, &info);
508}
509
510VkResult QueryPool::results(uint32_t start, uint32_t count, size_t size, void *data)
511{
512    size_t tmp = size;
513    VkResult err = vkGetQueryPoolResults(device(), handle(), start, count, &tmp, data, 0);
514    if (err == VK_SUCCESS) {
515        if (!EXPECT(tmp == size))
516            memset(data, 0, size);
517    } else {
518        EXPECT(err == VK_NOT_READY);
519    }
520
521    return err;
522}
523
524NON_DISPATCHABLE_HANDLE_DTOR(Buffer, vkDestroyBuffer)
525
526void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags mem_props)
527{
528    init_no_mem(dev, info);
529
530    internal_mem_.init(dev, get_resource_alloc_info(dev, memory_requirements(), mem_props));
531    bind_memory(internal_mem_, 0);
532}
533
534void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info)
535{
536    NON_DISPATCHABLE_HANDLE_INIT(vkCreateBuffer, dev, &info);
537    create_info_ = info;
538}
539
540VkMemoryRequirements Buffer::memory_requirements() const
541{
542    VkMemoryRequirements reqs;
543
544    EXPECT(vkGetBufferMemoryRequirements(device(), handle(), &reqs) == VK_SUCCESS);
545
546    return reqs;
547}
548
549void Buffer::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset)
550{
551    EXPECT(vkBindBufferMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
552}
553
554NON_DISPATCHABLE_HANDLE_DTOR(BufferView, vkDestroyBufferView)
555
556void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info)
557{
558    NON_DISPATCHABLE_HANDLE_INIT(vkCreateBufferView, dev, &info);
559}
560
561NON_DISPATCHABLE_HANDLE_DTOR(Image, vkDestroyImage)
562
563void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags mem_props)
564{
565    init_no_mem(dev, info);
566
567    internal_mem_.init(dev, get_resource_alloc_info(dev, memory_requirements(), mem_props));
568    bind_memory(internal_mem_, 0);
569}
570
571void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info)
572{
573    NON_DISPATCHABLE_HANDLE_INIT(vkCreateImage, dev, &info);
574    init_info(dev, info);
575}
576
577void Image::init_info(const Device &dev, const VkImageCreateInfo &info)
578{
579    create_info_ = info;
580
581    for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
582        if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
583            format_features_ = it->features;
584            break;
585        }
586    }
587}
588
589VkMemoryRequirements Image::memory_requirements() const
590{
591    VkMemoryRequirements reqs;
592
593    EXPECT(vkGetImageMemoryRequirements(device(), handle(), &reqs) == VK_SUCCESS);
594
595    return reqs;
596}
597
598void Image::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset)
599{
600    EXPECT(vkBindImageMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
601}
602
603VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const
604{
605    VkSubresourceLayout data;
606    size_t size = sizeof(data);
607    if (!EXPECT(vkGetImageSubresourceLayout(device(), handle(), &subres, &data) == VK_SUCCESS && size == sizeof(data)))
608        memset(&data, 0, sizeof(data));
609
610    return data;
611}
612
613bool Image::transparent() const
614{
615    return (create_info_.tiling == VK_IMAGE_TILING_LINEAR &&
616            create_info_.samples == 1 &&
617            !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
618                                    VK_IMAGE_USAGE_DEPTH_STENCIL_BIT)));
619}
620
621NON_DISPATCHABLE_HANDLE_DTOR(ImageView, vkDestroyImageView)
622
623void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info)
624{
625    NON_DISPATCHABLE_HANDLE_INIT(vkCreateImageView, dev, &info);
626}
627
628NON_DISPATCHABLE_HANDLE_DTOR(AttachmentView, vkDestroyAttachmentView)
629
630void AttachmentView::init(const Device &dev, const VkAttachmentViewCreateInfo &info)
631{
632    NON_DISPATCHABLE_HANDLE_INIT(vkCreateAttachmentView, dev, &info);
633}
634
635NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule, vkDestroyShaderModule)
636
637void ShaderModule::init(const Device &dev, const VkShaderModuleCreateInfo &info)
638{
639    NON_DISPATCHABLE_HANDLE_INIT(vkCreateShaderModule, dev, &info);
640}
641
642VkResult ShaderModule::init_try(const Device &dev, const VkShaderModuleCreateInfo &info)
643{
644    VkShaderModule mod;
645
646    VkResult err = vkCreateShaderModule(dev.handle(), &info, &mod);
647    if (err == VK_SUCCESS)
648        NonDispHandle::init(dev.handle(), mod);
649
650    return err;
651}
652
653NON_DISPATCHABLE_HANDLE_DTOR(Shader, vkDestroyShader)
654
655void Shader::init(const Device &dev, const VkShaderCreateInfo &info)
656{
657    NON_DISPATCHABLE_HANDLE_INIT(vkCreateShader, dev, &info);
658}
659
660VkResult Shader::init_try(const Device &dev, const VkShaderCreateInfo &info)
661{
662    VkShader sh;
663
664    VkResult err = vkCreateShader(dev.handle(), &info, &sh);
665    if (err == VK_SUCCESS)
666        NonDispHandle::init(dev.handle(), sh);
667
668    return err;
669}
670
671NON_DISPATCHABLE_HANDLE_DTOR(Pipeline, vkDestroyPipeline)
672
673void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
674{
675    VkPipelineCache cache;
676    VkPipelineCacheCreateInfo ci;
677    memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo));
678    ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
679    VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache);
680    if (err == VK_SUCCESS) {
681        NON_DISPATCHABLE_HANDLE_INIT(vkCreateGraphicsPipelines, dev, cache, 1, &info);
682        vkDestroyPipelineCache(dev.handle(), cache);
683    }
684}
685
686VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
687{
688    VkPipeline pipe;
689    VkPipelineCache cache;
690    VkPipelineCacheCreateInfo ci;
691    memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo));
692    ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
693    VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache);
694    EXPECT(err == VK_SUCCESS);
695    if (err == VK_SUCCESS) {
696        err = vkCreateGraphicsPipelines(dev.handle(), cache, 1, &info, &pipe);
697        if (err == VK_SUCCESS) {
698            NonDispHandle::init(dev.handle(), pipe);
699            vkDestroyPipelineCache(dev.handle(), cache);
700        }
701    }
702
703    return err;
704}
705
706void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info)
707{
708    VkPipelineCache cache;
709    VkPipelineCacheCreateInfo ci;
710    memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo));
711    ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
712    VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache);
713    if (err == VK_SUCCESS) {
714        NON_DISPATCHABLE_HANDLE_INIT(vkCreateComputePipelines, dev, cache, 1, &info);
715        vkDestroyPipelineCache(dev.handle(), cache);
716    }
717}
718
719NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout, vkDestroyPipelineLayout)
720
721void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info, const std::vector<const DescriptorSetLayout *> &layouts)
722{
723    const std::vector<VkDescriptorSetLayout> layout_handles = make_handles<VkDescriptorSetLayout>(layouts);
724    info.pSetLayouts = &layout_handles[0];
725
726    NON_DISPATCHABLE_HANDLE_INIT(vkCreatePipelineLayout, dev, &info);
727}
728
729NON_DISPATCHABLE_HANDLE_DTOR(Sampler, vkDestroySampler)
730
731void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info)
732{
733    NON_DISPATCHABLE_HANDLE_INIT(vkCreateSampler, dev, &info);
734}
735
736NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout, vkDestroyDescriptorSetLayout)
737
738void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info)
739{
740    NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorSetLayout, dev, &info);
741}
742
743NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool, vkDestroyDescriptorPool)
744
745void DescriptorPool::init(const Device &dev, VkDescriptorPoolUsage usage,
746                          uint32_t max_sets, const VkDescriptorPoolCreateInfo &info)
747{
748    NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorPool, dev, usage, max_sets, &info);
749}
750
751void DescriptorPool::reset()
752{
753    EXPECT(vkResetDescriptorPool(device(), handle()) == VK_SUCCESS);
754}
755
756std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const std::vector<const DescriptorSetLayout *> &layouts)
757{
758    const std::vector<VkDescriptorSetLayout> layout_handles = make_handles<VkDescriptorSetLayout>(layouts);
759
760    std::vector<VkDescriptorSet> set_handles;
761    set_handles.resize(layout_handles.size());
762
763    uint32_t set_count;
764    VkResult err = vkAllocDescriptorSets(device(), handle(), usage, layout_handles.size(), &layout_handles[0], &set_handles[0], &set_count);
765    if (err == VK_SUCCESS)
766        EXPECT(set_count == set_handles.size());
767    set_handles.resize(set_count);
768
769    std::vector<DescriptorSet *> sets;
770    sets.reserve(set_count);
771    for (std::vector<VkDescriptorSet>::const_iterator it = set_handles.begin(); it != set_handles.end(); it++) {
772        // do descriptor sets need memories bound?
773        DescriptorSet *descriptorSet = new DescriptorSet(dev, handle(), *it);
774        sets.push_back(descriptorSet);
775    }
776    return sets;
777}
778
779std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout, uint32_t count)
780{
781    return alloc_sets(dev, usage, std::vector<const DescriptorSetLayout *>(count, &layout));
782}
783
784DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, VkDescriptorSetUsage usage, const DescriptorSetLayout &layout)
785{
786    std::vector<DescriptorSet *> set = alloc_sets(dev, usage, layout, 1);
787    return (set.empty()) ? NULL : set[0];
788}
789
790DescriptorSet::~DescriptorSet()
791{
792    if (initialized()) {
793        VkDescriptorSet sets[1] = { handle() };
794        EXPECT(vkFreeDescriptorSets(device(), pool_, 1, sets) == VK_SUCCESS);
795    }
796}
797
798NON_DISPATCHABLE_HANDLE_DTOR(DynamicViewportState, vkDestroyDynamicViewportState)
799
800void DynamicViewportState::init(const Device &dev, const VkDynamicViewportStateCreateInfo &info)
801{
802    NON_DISPATCHABLE_HANDLE_INIT(vkCreateDynamicViewportState, dev, &info);
803}
804
805NON_DISPATCHABLE_HANDLE_DTOR(DynamicRasterState, vkDestroyDynamicRasterState)
806
807void DynamicRasterState::init(const Device &dev, const VkDynamicRasterStateCreateInfo &info)
808{
809    NON_DISPATCHABLE_HANDLE_INIT(vkCreateDynamicRasterState, dev, &info);
810}
811
812NON_DISPATCHABLE_HANDLE_DTOR(DynamicColorBlendState, vkDestroyDynamicColorBlendState)
813
814void DynamicColorBlendState::init(const Device &dev, const VkDynamicColorBlendStateCreateInfo &info)
815{
816    NON_DISPATCHABLE_HANDLE_INIT(vkCreateDynamicColorBlendState, dev, &info);
817}
818
819NON_DISPATCHABLE_HANDLE_DTOR(DynamicDepthStencilState, vkDestroyDynamicDepthStencilState)
820
821void DynamicDepthStencilState::init(const Device &dev, const VkDynamicDepthStencilStateCreateInfo &info)
822{
823    NON_DISPATCHABLE_HANDLE_INIT(vkCreateDynamicDepthStencilState, dev, &info);
824}
825
826NON_DISPATCHABLE_HANDLE_DTOR(CmdPool, vkDestroyCommandPool)
827
828void CmdPool::init(const Device &dev, const VkCmdPoolCreateInfo &info)
829{
830    NON_DISPATCHABLE_HANDLE_INIT(vkCreateCommandPool, dev, &info);
831}
832
833
834CmdBuffer::~CmdBuffer()
835{
836    if (initialized())
837        EXPECT(vkDestroyCommandBuffer(dev_handle_, handle()) == VK_SUCCESS);
838}
839
840void CmdBuffer::init(const Device &dev, const VkCmdBufferCreateInfo &info)
841{
842    VkCmdBuffer cmd;
843
844    if (EXPECT(vkCreateCommandBuffer(dev.handle(), &info, &cmd) == VK_SUCCESS)) {
845        Handle::init(cmd);
846        dev_handle_ = dev.handle();
847    }
848}
849
850void CmdBuffer::begin(const VkCmdBufferBeginInfo *info)
851{
852    EXPECT(vkBeginCommandBuffer(handle(), info) == VK_SUCCESS);
853}
854
855void CmdBuffer::begin()
856{
857    VkCmdBufferBeginInfo info = {};
858    info.flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT |
859          VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT;
860    info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO;
861
862    begin(&info);
863}
864
865void CmdBuffer::end()
866{
867    EXPECT(vkEndCommandBuffer(handle()) == VK_SUCCESS);
868}
869
870void CmdBuffer::reset(VkCmdBufferResetFlags flags)
871{
872    EXPECT(vkResetCommandBuffer(handle(), flags) == VK_SUCCESS);
873}
874
875}; // namespace vk_testing
876