vktestbinding.cpp revision 763a749795bb4827efbbf19cbc87b959511496dd
103333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// VK tests
203333823c75a1c1887e923828113a1b0fd12020cElliott Hughes//
303333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// Copyright (C) 2014 LunarG, Inc.
403333823c75a1c1887e923828113a1b0fd12020cElliott Hughes//
503333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// Permission is hereby granted, free of charge, to any person obtaining a
603333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// copy of this software and associated documentation files (the "Software"),
703333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// to deal in the Software without restriction, including without limitation
803333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// the rights to use, copy, modify, merge, publish, distribute, sublicense,
903333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// and/or sell copies of the Software, and to permit persons to whom the
1003333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// Software is furnished to do so, subject to the following conditions:
1103333823c75a1c1887e923828113a1b0fd12020cElliott Hughes//
1203333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// The above copyright notice and this permission notice shall be included
1303333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// in all copies or substantial portions of the Software.
1403333823c75a1c1887e923828113a1b0fd12020cElliott Hughes//
1503333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1603333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1703333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1803333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1903333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2003333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2103333823c75a1c1887e923828113a1b0fd12020cElliott Hughes// DEALINGS IN THE SOFTWARE.
2203333823c75a1c1887e923828113a1b0fd12020cElliott Hughes
2303333823c75a1c1887e923828113a1b0fd12020cElliott Hughes#include <iostream>
2403333823c75a1c1887e923828113a1b0fd12020cElliott Hughes#include <string.h> // memset(), memcmp()
2503333823c75a1c1887e923828113a1b0fd12020cElliott Hughes#include <assert.h>
2603333823c75a1c1887e923828113a1b0fd12020cElliott Hughes#include <stdarg.h>
2703333823c75a1c1887e923828113a1b0fd12020cElliott Hughes#include "vktestbinding.h"
2803333823c75a1c1887e923828113a1b0fd12020cElliott Hughes
2903333823c75a1c1887e923828113a1b0fd12020cElliott Hughesnamespace {
3003333823c75a1c1887e923828113a1b0fd12020cElliott Hughes
3103333823c75a1c1887e923828113a1b0fd12020cElliott Hughes#define NON_DISPATCHABLE_HANDLE_INIT(create_func, dev, ...)                         \
3203333823c75a1c1887e923828113a1b0fd12020cElliott Hughes    do {                                                                            \
3303333823c75a1c1887e923828113a1b0fd12020cElliott Hughes        handle_type handle;                                                         \
3403333823c75a1c1887e923828113a1b0fd12020cElliott Hughes        if (EXPECT(create_func(dev.handle(), __VA_ARGS__, &handle) == VK_SUCCESS))  \
3503333823c75a1c1887e923828113a1b0fd12020cElliott Hughes            NonDispHandle::init(dev.handle(), handle);                              \
3603333823c75a1c1887e923828113a1b0fd12020cElliott Hughes    } while (0)
3703333823c75a1c1887e923828113a1b0fd12020cElliott Hughes
3803333823c75a1c1887e923828113a1b0fd12020cElliott Hughes#define NON_DISPATCHABLE_HANDLE_DTOR(cls, destroy_func)                             \
39    cls::~cls()                                                                     \
40    {                                                                               \
41        if (initialized())                                                          \
42            destroy_func(device(), handle());                                       \
43    }
44
45#define STRINGIFY(x) #x
46#define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
47
48
49vk_testing::ErrorCallback error_callback;
50
51bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function)
52{
53    if (error_callback) {
54        error_callback(expr, file, line, function);
55    } else {
56        std::cerr << file << ":" << line << ": " << function <<
57            ": Expectation `" << expr << "' failed.\n";
58    }
59
60    return false;
61}
62
63template<class T, class S>
64std::vector<T> make_handles(const std::vector<S> &v)
65{
66    std::vector<T> handles;
67    handles.reserve(v.size());
68    for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++)
69        handles.push_back((*it)->handle());
70    return handles;
71}
72
73VkMemoryAllocInfo get_resource_alloc_info(const vk_testing::Device &dev, const VkMemoryRequirements &reqs, VkMemoryPropertyFlags mem_props)
74{
75    VkMemoryAllocInfo info = vk_testing::DeviceMemory::alloc_info(reqs.size, 0);
76    dev.phy().set_memory_type(reqs.memoryTypeBits, &info, mem_props);
77
78    return info;
79}
80
81} // namespace
82
83namespace vk_testing {
84
85void set_error_callback(ErrorCallback callback)
86{
87    error_callback = callback;
88}
89
90VkPhysicalDeviceProperties PhysicalDevice::properties() const
91{
92    VkPhysicalDeviceProperties info;
93
94    vkGetPhysicalDeviceProperties(handle(), &info);
95
96    return info;
97}
98
99std::vector<VkQueueFamilyProperties> PhysicalDevice::queue_properties() const
100{
101    std::vector<VkQueueFamilyProperties> info;
102    uint32_t count;
103
104    // Call once with NULL data to receive count
105    vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, NULL);
106    info.resize(count);
107    vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, info.data());
108
109    return info;
110}
111
112VkPhysicalDeviceMemoryProperties PhysicalDevice::memory_properties() const
113{
114    VkPhysicalDeviceMemoryProperties info;
115
116    vkGetPhysicalDeviceMemoryProperties(handle(), &info);
117
118    return info;
119}
120
121/*
122 * Return list of Global layers available
123 */
124std::vector<VkLayerProperties> GetGlobalLayers()
125{
126    VkResult err;
127    std::vector<VkLayerProperties> layers;
128    uint32_t layer_count;
129
130    do {
131        layer_count = 0;
132        err = vkEnumerateInstanceLayerProperties(&layer_count, NULL);
133
134        if (err == VK_SUCCESS) {
135            layers.reserve(layer_count);
136            err = vkEnumerateInstanceLayerProperties(&layer_count, layers.data());
137        }
138    } while (err == VK_INCOMPLETE);
139
140    assert(err == VK_SUCCESS);
141
142    return layers;
143}
144
145/*
146 * Return list of Global extensions provided by the ICD / Loader
147 */
148std::vector<VkExtensionProperties> GetGlobalExtensions()
149{
150    return GetGlobalExtensions(NULL);
151}
152
153/*
154 * Return list of Global extensions provided by the specified layer
155 * If pLayerName is NULL, will return extensions implemented by the loader / ICDs
156 */
157std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName)
158{
159    std::vector<VkExtensionProperties> exts;
160    uint32_t ext_count;
161    VkResult err;
162
163    do {
164        ext_count = 0;
165        err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count, NULL);
166
167        if (err == VK_SUCCESS) {
168            exts.resize(ext_count);
169            err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count, exts.data());
170        }
171    } while (err == VK_INCOMPLETE);
172
173    assert(err == VK_SUCCESS);
174
175    return exts;
176}
177
178/*
179 * Return list of PhysicalDevice extensions provided by the ICD / Loader
180 */
181std::vector<VkExtensionProperties> PhysicalDevice::extensions() const
182{
183    return extensions(NULL);
184}
185
186/*
187 * Return list of PhysicalDevice extensions provided by the specified layer
188 * If pLayerName is NULL, will return extensions for ICD / loader.
189 */
190std::vector<VkExtensionProperties> PhysicalDevice::extensions(const char *pLayerName) const
191{
192    std::vector<VkExtensionProperties> exts;
193    VkResult err;
194
195    do {
196        uint32_t extCount = 0;
197        err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName, &extCount, NULL);
198
199        if (err == VK_SUCCESS) {
200            exts.resize(extCount);
201            err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName, &extCount, exts.data());
202        }
203    } while (err == VK_INCOMPLETE);
204
205    assert(err == VK_SUCCESS);
206
207    return exts;
208}
209
210bool PhysicalDevice::set_memory_type(const uint32_t type_bits, VkMemoryAllocInfo *info, const VkFlags properties, const VkFlags forbid) const
211{
212     uint32_t type_mask = type_bits;
213     // Search memtypes to find first index with those properties
214     for (uint32_t i = 0; i < 32; i++) {
215         if ((type_mask & 1) == 1) {
216             // Type is available, does it match user properties?
217             if ((memory_properties_.memoryTypes[i].propertyFlags & properties) == properties &&
218                 (memory_properties_.memoryTypes[i].propertyFlags & forbid) == 0) {
219                 info->memoryTypeIndex = i;
220                 return true;
221             }
222         }
223         type_mask >>= 1;
224     }
225     // No memory types matched, return failure
226     return false;
227}
228
229/*
230 * Return list of PhysicalDevice layers
231 */
232std::vector<VkLayerProperties> PhysicalDevice::layers() const
233{
234    std::vector<VkLayerProperties> layer_props;
235    VkResult err;
236
237    do {
238        uint32_t layer_count = 0;
239        err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, NULL);
240
241        if (err == VK_SUCCESS) {
242            layer_props.reserve(layer_count);
243            err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, layer_props.data());
244        }
245    } while (err == VK_INCOMPLETE);
246
247    assert(err == VK_SUCCESS);
248
249    return layer_props;
250}
251
252Device::~Device()
253{
254    if (!initialized())
255        return;
256
257    for (int i = 0; i < QUEUE_COUNT; i++) {
258        for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++)
259            delete *it;
260        queues_[i].clear();
261    }
262
263    vkDestroyDevice(handle());
264}
265
266void Device::init(std::vector<const char *> &layers, std::vector<const char *> &extensions)
267{
268    // request all queues
269    const std::vector<VkQueueFamilyProperties> queue_props = phy_.queue_properties();
270    std::vector<VkDeviceQueueCreateInfo> queue_info;
271    queue_info.reserve(queue_props.size());
272    for (int i = 0; i < queue_props.size(); i++) {
273        VkDeviceQueueCreateInfo qi = {};
274        qi.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
275        qi.pNext = NULL;
276        qi.queueFamilyIndex = i;
277        qi.queuePriorityCount = queue_props[i].queueCount;
278        std::vector<float> queue_priorities (qi.queuePriorityCount, 0.0);
279        qi.pQueuePriorities = queue_priorities.data();
280        if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
281            graphics_queue_node_index_ = i;
282        }
283        queue_info.push_back(qi);
284    }
285
286    VkDeviceCreateInfo dev_info = {};
287    dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
288    dev_info.pNext = NULL;
289    dev_info.requestedQueueCount = queue_info.size();
290    dev_info.pRequestedQueues = queue_info.data();
291    dev_info.enabledLayerNameCount = layers.size();
292    dev_info.ppEnabledLayerNames = layers.data();
293    dev_info.enabledExtensionNameCount = extensions.size();
294    dev_info.ppEnabledExtensionNames = extensions.data();
295
296    init(dev_info);
297}
298
299void Device::init(const VkDeviceCreateInfo &info)
300{
301    VkDevice dev;
302
303    if (EXPECT(vkCreateDevice(phy_.handle(), &info, &dev) == VK_SUCCESS))
304        Handle::init(dev);
305
306    init_queues();
307    init_formats();
308}
309
310void Device::init_queues()
311{
312    uint32_t queue_node_count;
313
314    // Call with NULL data to get count
315    vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, NULL);
316    EXPECT(queue_node_count >= 1);
317
318    VkQueueFamilyProperties* queue_props = new VkQueueFamilyProperties[queue_node_count];
319
320    vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, queue_props);
321
322    for (uint32_t i = 0; i < queue_node_count; i++) {
323        VkQueue queue;
324
325        for (uint32_t j = 0; j < queue_props[i].queueCount; j++) {
326            // TODO: Need to add support for separate MEMMGR and work queues, including synchronization
327            vkGetDeviceQueue(handle(), i, j, &queue);
328
329            if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
330                queues_[GRAPHICS].push_back(new Queue(queue, i));
331            }
332
333            if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
334                queues_[COMPUTE].push_back(new Queue(queue, i));
335            }
336
337            if (queue_props[i].queueFlags & VK_QUEUE_DMA_BIT) {
338                queues_[DMA].push_back(new Queue(queue, i));
339            }
340        }
341    }
342
343    delete[] queue_props;
344
345    EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
346}
347
348void Device::init_formats()
349{
350    for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) {
351        const VkFormat fmt = static_cast<VkFormat>(f);
352        const VkFormatProperties props = format_properties(fmt);
353
354        if (props.linearTilingFeatures) {
355            const Format tmp = { fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures };
356            formats_.push_back(tmp);
357        }
358
359        if (props.optimalTilingFeatures) {
360            const Format tmp = { fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures };
361            formats_.push_back(tmp);
362        }
363    }
364
365    EXPECT(!formats_.empty());
366}
367
368VkFormatProperties Device::format_properties(VkFormat format)
369{
370    VkFormatProperties data;
371    vkGetPhysicalDeviceFormatProperties(phy().handle(), format, &data);
372
373    return data;
374}
375
376void Device::wait()
377{
378    EXPECT(vkDeviceWaitIdle(handle()) == VK_SUCCESS);
379}
380
381VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout)
382{
383    const std::vector<VkFence> fence_handles = make_handles<VkFence>(fences);
384    VkResult err = vkWaitForFences(handle(), fence_handles.size(), fence_handles.data(), wait_all, timeout);
385    EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
386
387    return err;
388}
389
390void Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes, const std::vector<VkCopyDescriptorSet> &copies)
391{
392    vkUpdateDescriptorSets(handle(), writes.size(), writes.data(), copies.size(), copies.data());
393}
394
395void Queue::submit(const std::vector<const CmdBuffer *> &cmds, Fence &fence)
396{
397    const std::vector<VkCmdBuffer> cmd_handles = make_handles<VkCmdBuffer>(cmds);
398    VkSubmitInfo submit_info;
399    submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
400    submit_info.pNext = NULL;
401    submit_info.waitSemaphoreCount = 0;
402    submit_info.pWaitSemaphores = NULL;
403    submit_info.commandBufferCount = (uint32_t)cmd_handles.size();
404    submit_info.pCommandBuffers = cmd_handles.data();
405    submit_info.signalSemaphoreCount = 0;
406    submit_info.pSignalSemaphores = NULL;
407
408    EXPECT(vkQueueSubmit(handle(), 1, &submit_info, fence.handle()) == VK_SUCCESS);
409}
410
411void Queue::submit(const CmdBuffer &cmd, Fence &fence)
412{
413    submit(std::vector<const CmdBuffer*>(1, &cmd), fence);
414}
415
416void Queue::submit(const CmdBuffer &cmd)
417{
418    Fence fence;
419    submit(cmd, fence);
420}
421
422void Queue::wait()
423{
424    EXPECT(vkQueueWaitIdle(handle()) == VK_SUCCESS);
425}
426
427DeviceMemory::~DeviceMemory()
428{
429    if (initialized())
430        vkFreeMemory(device(), handle());
431}
432
433void DeviceMemory::init(const Device &dev, const VkMemoryAllocInfo &info)
434{
435    NON_DISPATCHABLE_HANDLE_INIT(vkAllocMemory, dev, &info);
436}
437
438const void *DeviceMemory::map(VkFlags flags) const
439{
440    void *data;
441    if (!EXPECT(vkMapMemory(device(), handle(), 0 ,0, flags, &data) == VK_SUCCESS))
442        data = NULL;
443
444    return data;
445}
446
447void *DeviceMemory::map(VkFlags flags)
448{
449    void *data;
450    if (!EXPECT(vkMapMemory(device(), handle(), 0, 0, flags, &data) == VK_SUCCESS))
451        data = NULL;
452
453    return data;
454}
455
456void DeviceMemory::unmap() const
457{
458    vkUnmapMemory(device(), handle());
459}
460
461NON_DISPATCHABLE_HANDLE_DTOR(Fence, vkDestroyFence)
462
463void Fence::init(const Device &dev, const VkFenceCreateInfo &info)
464{
465    NON_DISPATCHABLE_HANDLE_INIT(vkCreateFence, dev, &info);
466}
467
468NON_DISPATCHABLE_HANDLE_DTOR(Semaphore, vkDestroySemaphore)
469
470void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info)
471{
472    NON_DISPATCHABLE_HANDLE_INIT(vkCreateSemaphore, dev, &info);
473}
474
475NON_DISPATCHABLE_HANDLE_DTOR(Event, vkDestroyEvent)
476
477void Event::init(const Device &dev, const VkEventCreateInfo &info)
478{
479    NON_DISPATCHABLE_HANDLE_INIT(vkCreateEvent, dev, &info);
480}
481
482void Event::set()
483{
484    EXPECT(vkSetEvent(device(), handle()) == VK_SUCCESS);
485}
486
487void Event::reset()
488{
489    EXPECT(vkResetEvent(device(), handle()) == VK_SUCCESS);
490}
491
492NON_DISPATCHABLE_HANDLE_DTOR(QueryPool, vkDestroyQueryPool)
493
494void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info)
495{
496    NON_DISPATCHABLE_HANDLE_INIT(vkCreateQueryPool, dev, &info);
497}
498
499VkResult QueryPool::results(uint32_t start, uint32_t count, size_t size, void *data, size_t stride)
500{
501    VkResult err = vkGetQueryPoolResults(device(), handle(), start, count, size, data, stride, 0);
502    EXPECT(err == VK_SUCCESS || err == VK_NOT_READY);
503
504    return err;
505}
506
507NON_DISPATCHABLE_HANDLE_DTOR(Buffer, vkDestroyBuffer)
508
509void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags mem_props)
510{
511    init_no_mem(dev, info);
512
513    internal_mem_.init(dev, get_resource_alloc_info(dev, memory_requirements(), mem_props));
514    bind_memory(internal_mem_, 0);
515}
516
517void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info)
518{
519    NON_DISPATCHABLE_HANDLE_INIT(vkCreateBuffer, dev, &info);
520    create_info_ = info;
521}
522
523VkMemoryRequirements Buffer::memory_requirements() const
524{
525    VkMemoryRequirements reqs;
526
527    vkGetBufferMemoryRequirements(device(), handle(), &reqs);
528
529    return reqs;
530}
531
532void Buffer::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset)
533{
534    EXPECT(vkBindBufferMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
535}
536
537NON_DISPATCHABLE_HANDLE_DTOR(BufferView, vkDestroyBufferView)
538
539void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info)
540{
541    NON_DISPATCHABLE_HANDLE_INIT(vkCreateBufferView, dev, &info);
542}
543
544NON_DISPATCHABLE_HANDLE_DTOR(Image, vkDestroyImage)
545
546void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags mem_props)
547{
548    init_no_mem(dev, info);
549
550    internal_mem_.init(dev, get_resource_alloc_info(dev, memory_requirements(), mem_props));
551    bind_memory(internal_mem_, 0);
552}
553
554void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info)
555{
556    NON_DISPATCHABLE_HANDLE_INIT(vkCreateImage, dev, &info);
557    init_info(dev, info);
558}
559
560void Image::init_info(const Device &dev, const VkImageCreateInfo &info)
561{
562    create_info_ = info;
563
564    for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
565        if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
566            format_features_ = it->features;
567            break;
568        }
569    }
570}
571
572VkMemoryRequirements Image::memory_requirements() const
573{
574    VkMemoryRequirements reqs;
575
576    vkGetImageMemoryRequirements(device(), handle(), &reqs);
577
578    return reqs;
579}
580
581void Image::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset)
582{
583    EXPECT(vkBindImageMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
584}
585
586VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const
587{
588    VkSubresourceLayout data;
589    size_t size = sizeof(data);
590    vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
591    if (size != sizeof(data))
592        memset(&data, 0, sizeof(data));
593
594    return data;
595}
596
597VkSubresourceLayout Image::subresource_layout(const VkImageSubresourceCopy &subrescopy) const
598{
599    VkSubresourceLayout data;
600    VkImageSubresource subres = subresource(image_aspect(subrescopy.aspect), subrescopy.mipLevel, subrescopy.baseArrayLayer);
601    size_t size = sizeof(data);
602    vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
603    if (size != sizeof(data))
604        memset(&data, 0, sizeof(data));
605
606    return data;
607}
608
609bool Image::transparent() const
610{
611    return (create_info_.tiling == VK_IMAGE_TILING_LINEAR &&
612            create_info_.samples == 1 &&
613            !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
614                                    VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)));
615}
616
617NON_DISPATCHABLE_HANDLE_DTOR(ImageView, vkDestroyImageView)
618
619void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info)
620{
621    NON_DISPATCHABLE_HANDLE_INIT(vkCreateImageView, dev, &info);
622}
623
624NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule, vkDestroyShaderModule)
625
626void ShaderModule::init(const Device &dev, const VkShaderModuleCreateInfo &info)
627{
628    NON_DISPATCHABLE_HANDLE_INIT(vkCreateShaderModule, dev, &info);
629}
630
631VkResult ShaderModule::init_try(const Device &dev, const VkShaderModuleCreateInfo &info)
632{
633    VkShaderModule mod;
634
635    VkResult err = vkCreateShaderModule(dev.handle(), &info, &mod);
636    if (err == VK_SUCCESS)
637        NonDispHandle::init(dev.handle(), mod);
638
639    return err;
640}
641
642NON_DISPATCHABLE_HANDLE_DTOR(Shader, vkDestroyShader)
643
644void Shader::init(const Device &dev, const VkShaderCreateInfo &info)
645{
646    NON_DISPATCHABLE_HANDLE_INIT(vkCreateShader, dev, &info);
647}
648
649VkResult Shader::init_try(const Device &dev, const VkShaderCreateInfo &info)
650{
651    VkShader sh;
652
653    VkResult err = vkCreateShader(dev.handle(), &info, &sh);
654    if (err == VK_SUCCESS)
655        NonDispHandle::init(dev.handle(), sh);
656
657    return err;
658}
659
660NON_DISPATCHABLE_HANDLE_DTOR(Pipeline, vkDestroyPipeline)
661
662void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
663{
664    VkPipelineCache cache;
665    VkPipelineCacheCreateInfo ci;
666    memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo));
667    ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
668    VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache);
669    if (err == VK_SUCCESS) {
670        NON_DISPATCHABLE_HANDLE_INIT(vkCreateGraphicsPipelines, dev, cache, 1, &info);
671        vkDestroyPipelineCache(dev.handle(), cache);
672    }
673}
674
675VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info)
676{
677    VkPipeline pipe;
678    VkPipelineCache cache;
679    VkPipelineCacheCreateInfo ci;
680    memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo));
681    ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
682    VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache);
683    EXPECT(err == VK_SUCCESS);
684    if (err == VK_SUCCESS) {
685        err = vkCreateGraphicsPipelines(dev.handle(), cache, 1, &info, &pipe);
686        if (err == VK_SUCCESS) {
687            NonDispHandle::init(dev.handle(), pipe);
688        }
689        vkDestroyPipelineCache(dev.handle(), cache);
690    }
691
692    return err;
693}
694
695void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info)
696{
697    VkPipelineCache cache;
698    VkPipelineCacheCreateInfo ci;
699    memset((void *) &ci, 0, sizeof(VkPipelineCacheCreateInfo));
700    ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
701    VkResult err = vkCreatePipelineCache(dev.handle(), &ci, &cache);
702    if (err == VK_SUCCESS) {
703        NON_DISPATCHABLE_HANDLE_INIT(vkCreateComputePipelines, dev, cache, 1, &info);
704        vkDestroyPipelineCache(dev.handle(), cache);
705    }
706}
707
708NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout, vkDestroyPipelineLayout)
709
710void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info, const std::vector<const DescriptorSetLayout *> &layouts)
711{
712    const std::vector<VkDescriptorSetLayout> layout_handles = make_handles<VkDescriptorSetLayout>(layouts);
713    info.pSetLayouts = layout_handles.data();
714
715    NON_DISPATCHABLE_HANDLE_INIT(vkCreatePipelineLayout, dev, &info);
716}
717
718NON_DISPATCHABLE_HANDLE_DTOR(Sampler, vkDestroySampler)
719
720void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info)
721{
722    NON_DISPATCHABLE_HANDLE_INIT(vkCreateSampler, dev, &info);
723}
724
725NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout, vkDestroyDescriptorSetLayout)
726
727void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info)
728{
729    NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorSetLayout, dev, &info);
730}
731
732NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool, vkDestroyDescriptorPool)
733
734void DescriptorPool::init(const Device &dev, const VkDescriptorPoolCreateInfo &info)
735{
736    setDynamicUsage(info.flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
737    NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorPool, dev, &info);
738}
739
740void DescriptorPool::reset()
741{
742    EXPECT(vkResetDescriptorPool(device(), handle(), 0) == VK_SUCCESS);
743}
744
745std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, const std::vector<const DescriptorSetLayout *> &layouts)
746{
747    const std::vector<VkDescriptorSetLayout> layout_handles = make_handles<VkDescriptorSetLayout>(layouts);
748
749    std::vector<VkDescriptorSet> set_handles;
750    set_handles.resize(layout_handles.size());
751
752    VkDescriptorSetAllocInfo alloc_info = {};
753    alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOC_INFO;
754    alloc_info.setLayoutCount = layout_handles.size();
755    alloc_info.descriptorPool = handle();
756    alloc_info.pSetLayouts = layout_handles.data();
757    VkResult err = vkAllocDescriptorSets(device(), &alloc_info, set_handles.data());
758    EXPECT(err == VK_SUCCESS);
759
760    std::vector<DescriptorSet *> sets;
761    for (std::vector<VkDescriptorSet>::const_iterator it = set_handles.begin(); it != set_handles.end(); it++) {
762        // do descriptor sets need memories bound?
763        DescriptorSet *descriptorSet = new DescriptorSet(dev, this, *it);
764        sets.push_back(descriptorSet);
765    }
766    return sets;
767}
768
769std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout, uint32_t count)
770{
771    return alloc_sets(dev, std::vector<const DescriptorSetLayout *>(count, &layout));
772}
773
774DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout)
775{
776    std::vector<DescriptorSet *> set = alloc_sets(dev, layout, 1);
777    return (set.empty()) ? NULL : set[0];
778}
779
780DescriptorSet::~DescriptorSet()
781{
782    if (initialized()) {
783        // Only call vkFree* on sets allocated from pool with usage *_DYNAMIC
784        if (containing_pool_->getDynamicUsage()) {
785            VkDescriptorSet sets[1] = { handle() };
786            EXPECT(vkFreeDescriptorSets(device(), containing_pool_->GetObj(), 1, sets) == VK_SUCCESS);
787        }
788    }
789}
790
791NON_DISPATCHABLE_HANDLE_DTOR(CmdPool, vkDestroyCommandPool)
792
793void CmdPool::init(const Device &dev, const VkCmdPoolCreateInfo &info)
794{
795    NON_DISPATCHABLE_HANDLE_INIT(vkCreateCommandPool, dev, &info);
796}
797
798
799CmdBuffer::~CmdBuffer()
800{
801    if (initialized()) {
802        VkCmdBuffer cmds[] = { handle() };
803        vkFreeCommandBuffers(dev_handle_, cmd_pool_, 1, cmds);
804    }
805}
806
807void CmdBuffer::init(const Device &dev, const VkCmdBufferAllocInfo &info)
808{
809    VkCmdBuffer cmd;
810
811    // Make sure cmdPool is set
812    assert(info.cmdPool);
813
814    if (EXPECT(vkAllocCommandBuffers(dev.handle(), &info, &cmd) == VK_SUCCESS)) {
815        Handle::init(cmd);
816        dev_handle_ = dev.handle();
817        cmd_pool_ = info.cmdPool;
818    }
819}
820
821void CmdBuffer::begin(const VkCmdBufferBeginInfo *info)
822{
823    EXPECT(vkBeginCommandBuffer(handle(), info) == VK_SUCCESS);
824}
825
826void CmdBuffer::begin()
827{
828    VkCmdBufferBeginInfo info = {};
829    info.flags = VK_CMD_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
830    info.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO;
831    info.renderPass = VK_NULL_HANDLE;
832    info.subpass = 0;
833    info.framebuffer = VK_NULL_HANDLE;
834
835    begin(&info);
836}
837
838void CmdBuffer::end()
839{
840    EXPECT(vkEndCommandBuffer(handle()) == VK_SUCCESS);
841}
842
843void CmdBuffer::reset(VkCmdBufferResetFlags flags)
844{
845    EXPECT(vkResetCommandBuffer(handle(), flags) == VK_SUCCESS);
846}
847
848}; // namespace vk_testing
849