1/*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 *      http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Pipeline barrier tests
22 *//*--------------------------------------------------------------------*/
23
24#include "vktMemoryPipelineBarrierTests.hpp"
25
26#include "vktTestCaseUtil.hpp"
27
28#include "vkDefs.hpp"
29#include "vkPlatform.hpp"
30#include "vkRefUtil.hpp"
31#include "vkQueryUtil.hpp"
32#include "vkMemUtil.hpp"
33#include "vkTypeUtil.hpp"
34#include "vkPrograms.hpp"
35
36#include "tcuMaybe.hpp"
37#include "tcuTextureUtil.hpp"
38#include "tcuTestLog.hpp"
39#include "tcuResultCollector.hpp"
40#include "tcuTexture.hpp"
41#include "tcuImageCompare.hpp"
42
43#include "deUniquePtr.hpp"
44#include "deStringUtil.hpp"
45#include "deRandom.hpp"
46
47#include "deInt32.h"
48#include "deMath.h"
49#include "deMemory.h"
50
51#include <map>
52#include <set>
53#include <sstream>
54#include <string>
55#include <vector>
56
57using tcu::TestLog;
58using tcu::Maybe;
59
60using de::MovePtr;
61
62using std::string;
63using std::vector;
64using std::map;
65using std::set;
66using std::pair;
67
68using tcu::IVec2;
69using tcu::UVec2;
70using tcu::UVec4;
71using tcu::Vec4;
72using tcu::ConstPixelBufferAccess;
73using tcu::PixelBufferAccess;
74using tcu::TextureFormat;
75using tcu::TextureLevel;
76
77namespace vkt
78{
79namespace memory
80{
81namespace
82{
83enum
84{
85	MAX_UNIFORM_BUFFER_SIZE = 1024,
86	MAX_STORAGE_BUFFER_SIZE = (1<<28),
87	MAX_SIZE = (128 * 1024)
88};
89
90// \todo [mika] Add to utilities
91template<typename T>
92T divRoundUp (const T& a, const T& b)
93{
94	return (a / b) + (a % b == 0 ? 0 : 1);
95}
96
97enum
98{
99	ALL_PIPELINE_STAGES = vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
100						| vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
101						| vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
102						| vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
103						| vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
104						| vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
105						| vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
106						| vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
107						| vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
108						| vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
109						| vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
110						| vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
111						| vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
112						| vk::VK_PIPELINE_STAGE_TRANSFER_BIT
113						| vk::VK_PIPELINE_STAGE_HOST_BIT
114};
115
116enum
117{
118	ALL_ACCESSES = vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT
119				 | vk::VK_ACCESS_INDEX_READ_BIT
120				 | vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
121				 | vk::VK_ACCESS_UNIFORM_READ_BIT
122				 | vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
123				 | vk::VK_ACCESS_SHADER_READ_BIT
124				 | vk::VK_ACCESS_SHADER_WRITE_BIT
125				 | vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
126				 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
127				 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
128				 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
129				 | vk::VK_ACCESS_TRANSFER_READ_BIT
130				 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
131				 | vk::VK_ACCESS_HOST_READ_BIT
132				 | vk::VK_ACCESS_HOST_WRITE_BIT
133				 | vk::VK_ACCESS_MEMORY_READ_BIT
134				 | vk::VK_ACCESS_MEMORY_WRITE_BIT
135};
136
137enum Usage
138{
139	// Mapped host read and write
140	USAGE_HOST_READ = (0x1u<<0),
141	USAGE_HOST_WRITE = (0x1u<<1),
142
143	// Copy and other transfer operations
144	USAGE_TRANSFER_SRC = (0x1u<<2),
145	USAGE_TRANSFER_DST = (0x1u<<3),
146
147	// Buffer usage flags
148	USAGE_INDEX_BUFFER = (0x1u<<4),
149	USAGE_VERTEX_BUFFER = (0x1u<<5),
150
151	USAGE_UNIFORM_BUFFER = (0x1u<<6),
152	USAGE_STORAGE_BUFFER = (0x1u<<7),
153
154	USAGE_UNIFORM_TEXEL_BUFFER = (0x1u<<8),
155	USAGE_STORAGE_TEXEL_BUFFER = (0x1u<<9),
156
157	// \todo [2016-03-09 mika] This is probably almost impossible to do
158	USAGE_INDIRECT_BUFFER = (0x1u<<10),
159
160	// Texture usage flags
161	USAGE_SAMPLED_IMAGE = (0x1u<<11),
162	USAGE_STORAGE_IMAGE = (0x1u<<12),
163	USAGE_COLOR_ATTACHMENT = (0x1u<<13),
164	USAGE_INPUT_ATTACHMENT = (0x1u<<14),
165	USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u<<15),
166};
167
168bool supportsDeviceBufferWrites (Usage usage)
169{
170	if (usage & USAGE_TRANSFER_DST)
171		return true;
172
173	if (usage & USAGE_STORAGE_BUFFER)
174		return true;
175
176	if (usage & USAGE_STORAGE_TEXEL_BUFFER)
177		return true;
178
179	return false;
180}
181
182bool supportsDeviceImageWrites (Usage usage)
183{
184	if (usage & USAGE_TRANSFER_DST)
185		return true;
186
187	if (usage & USAGE_STORAGE_IMAGE)
188		return true;
189
190	if (usage & USAGE_COLOR_ATTACHMENT)
191		return true;
192
193	return false;
194}
195
196// Sequential access enums
197enum Access
198{
199	ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
200	ACCESS_INDEX_READ_BIT,
201	ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
202	ACCESS_UNIFORM_READ_BIT,
203	ACCESS_INPUT_ATTACHMENT_READ_BIT,
204	ACCESS_SHADER_READ_BIT,
205	ACCESS_SHADER_WRITE_BIT,
206	ACCESS_COLOR_ATTACHMENT_READ_BIT,
207	ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
208	ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
209	ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
210	ACCESS_TRANSFER_READ_BIT,
211	ACCESS_TRANSFER_WRITE_BIT,
212	ACCESS_HOST_READ_BIT,
213	ACCESS_HOST_WRITE_BIT,
214	ACCESS_MEMORY_READ_BIT,
215	ACCESS_MEMORY_WRITE_BIT,
216
217	ACCESS_LAST
218};
219
220// Sequential stage enums
221enum PipelineStage
222{
223	PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
224	PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
225	PIPELINESTAGE_DRAW_INDIRECT_BIT,
226	PIPELINESTAGE_VERTEX_INPUT_BIT,
227	PIPELINESTAGE_VERTEX_SHADER_BIT,
228	PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
229	PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
230	PIPELINESTAGE_GEOMETRY_SHADER_BIT,
231	PIPELINESTAGE_FRAGMENT_SHADER_BIT,
232	PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
233	PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
234	PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
235	PIPELINESTAGE_COMPUTE_SHADER_BIT,
236	PIPELINESTAGE_TRANSFER_BIT,
237	PIPELINESTAGE_HOST_BIT,
238
239	PIPELINESTAGE_LAST
240};
241
242PipelineStage pipelineStageFlagToPipelineStage (vk::VkPipelineStageFlagBits flags)
243{
244	switch (flags)
245	{
246		case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:						return PIPELINESTAGE_TOP_OF_PIPE_BIT;
247		case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT:					return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
248		case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT:					return PIPELINESTAGE_DRAW_INDIRECT_BIT;
249		case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT:					return PIPELINESTAGE_VERTEX_INPUT_BIT;
250		case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT:					return PIPELINESTAGE_VERTEX_SHADER_BIT;
251		case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT:		return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
252		case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT:	return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
253		case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT:					return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
254		case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT:					return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
255		case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT:			return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
256		case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT:				return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
257		case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT:			return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
258		case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT:					return PIPELINESTAGE_COMPUTE_SHADER_BIT;
259		case vk::VK_PIPELINE_STAGE_TRANSFER_BIT:						return PIPELINESTAGE_TRANSFER_BIT;
260		case vk::VK_PIPELINE_STAGE_HOST_BIT:							return PIPELINESTAGE_HOST_BIT;
261
262		default:
263			DE_FATAL("Unknown pipeline stage flags");
264			return PIPELINESTAGE_LAST;
265	}
266}
267
268Usage operator| (Usage a, Usage b)
269{
270	return (Usage)((deUint32)a | (deUint32)b);
271}
272
273Usage operator& (Usage a, Usage b)
274{
275	return (Usage)((deUint32)a & (deUint32)b);
276}
277
278string usageToName (Usage usage)
279{
280	const struct
281	{
282		Usage				usage;
283		const char* const	name;
284	} usageNames[] =
285	{
286		{ USAGE_HOST_READ,					"host_read" },
287		{ USAGE_HOST_WRITE,					"host_write" },
288
289		{ USAGE_TRANSFER_SRC,				"transfer_src" },
290		{ USAGE_TRANSFER_DST,				"transfer_dst" },
291
292		{ USAGE_INDEX_BUFFER,				"index_buffer" },
293		{ USAGE_VERTEX_BUFFER,				"vertex_buffer" },
294		{ USAGE_UNIFORM_BUFFER,				"uniform_buffer" },
295		{ USAGE_STORAGE_BUFFER,				"storage_buffer" },
296		{ USAGE_UNIFORM_TEXEL_BUFFER,		"uniform_texel_buffer" },
297		{ USAGE_STORAGE_TEXEL_BUFFER,		"storage_texel_buffer" },
298		{ USAGE_INDIRECT_BUFFER,			"indirect_buffer" },
299		{ USAGE_SAMPLED_IMAGE,				"image_sampled" },
300		{ USAGE_STORAGE_IMAGE,				"storage_image" },
301		{ USAGE_COLOR_ATTACHMENT,			"color_attachment" },
302		{ USAGE_INPUT_ATTACHMENT,			"input_attachment" },
303		{ USAGE_DEPTH_STENCIL_ATTACHMENT,	"depth_stencil_attachment" },
304	};
305
306	std::ostringstream	stream;
307	bool				first = true;
308
309	for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
310	{
311		if (usage & usageNames[usageNdx].usage)
312		{
313			if (!first)
314				stream << "_";
315			else
316				first = false;
317
318			stream << usageNames[usageNdx].name;
319		}
320	}
321
322	return stream.str();
323}
324
325vk::VkBufferUsageFlags usageToBufferUsageFlags (Usage usage)
326{
327	vk::VkBufferUsageFlags flags = 0;
328
329	if (usage & USAGE_TRANSFER_SRC)
330		flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
331
332	if (usage & USAGE_TRANSFER_DST)
333		flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
334
335	if (usage & USAGE_INDEX_BUFFER)
336		flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
337
338	if (usage & USAGE_VERTEX_BUFFER)
339		flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
340
341	if (usage & USAGE_INDIRECT_BUFFER)
342		flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
343
344	if (usage & USAGE_UNIFORM_BUFFER)
345		flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
346
347	if (usage & USAGE_STORAGE_BUFFER)
348		flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
349
350	if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
351		flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
352
353	if (usage & USAGE_STORAGE_TEXEL_BUFFER)
354		flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
355
356	return flags;
357}
358
359vk::VkImageUsageFlags usageToImageUsageFlags (Usage usage)
360{
361	vk::VkImageUsageFlags flags = 0;
362
363	if (usage & USAGE_TRANSFER_SRC)
364		flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
365
366	if (usage & USAGE_TRANSFER_DST)
367		flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
368
369	if (usage & USAGE_SAMPLED_IMAGE)
370		flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
371
372	if (usage & USAGE_STORAGE_IMAGE)
373		flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
374
375	if (usage & USAGE_COLOR_ATTACHMENT)
376		flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
377
378	if (usage & USAGE_INPUT_ATTACHMENT)
379		flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
380
381	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
382		flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
383
384	return flags;
385}
386
387vk::VkPipelineStageFlags usageToStageFlags (Usage usage)
388{
389	vk::VkPipelineStageFlags flags = 0;
390
391	if (usage & (USAGE_HOST_READ|USAGE_HOST_WRITE))
392		flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
393
394	if (usage & (USAGE_TRANSFER_SRC|USAGE_TRANSFER_DST))
395		flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
396
397	if (usage & (USAGE_VERTEX_BUFFER|USAGE_INDEX_BUFFER))
398		flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
399
400	if (usage & USAGE_INDIRECT_BUFFER)
401		flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
402
403	if (usage &
404			(USAGE_UNIFORM_BUFFER
405			| USAGE_STORAGE_BUFFER
406			| USAGE_UNIFORM_TEXEL_BUFFER
407			| USAGE_STORAGE_TEXEL_BUFFER
408			| USAGE_SAMPLED_IMAGE
409			| USAGE_STORAGE_IMAGE))
410	{
411		flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
412				| vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
413				| vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
414				| vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
415				| vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
416				| vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
417	}
418
419	if (usage & USAGE_INPUT_ATTACHMENT)
420		flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
421
422	if (usage & USAGE_COLOR_ATTACHMENT)
423		flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
424
425	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
426	{
427		flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
428				| vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
429	}
430
431	return flags;
432}
433
434vk::VkAccessFlags usageToAccessFlags (Usage usage)
435{
436	vk::VkAccessFlags flags = 0;
437
438	if (usage & USAGE_HOST_READ)
439		flags |= vk::VK_ACCESS_HOST_READ_BIT;
440
441	if (usage & USAGE_HOST_WRITE)
442		flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
443
444	if (usage & USAGE_TRANSFER_SRC)
445		flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
446
447	if (usage & USAGE_TRANSFER_DST)
448		flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
449
450	if (usage & USAGE_INDEX_BUFFER)
451		flags |= vk::VK_ACCESS_INDEX_READ_BIT;
452
453	if (usage & USAGE_VERTEX_BUFFER)
454		flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
455
456	if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
457		flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
458
459	if (usage & USAGE_SAMPLED_IMAGE)
460		flags |= vk::VK_ACCESS_SHADER_READ_BIT;
461
462	if (usage & (USAGE_STORAGE_BUFFER
463				| USAGE_STORAGE_TEXEL_BUFFER
464				| USAGE_STORAGE_IMAGE))
465		flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
466
467	if (usage & USAGE_INDIRECT_BUFFER)
468		flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
469
470	if (usage & USAGE_COLOR_ATTACHMENT)
471		flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
472
473	if (usage & USAGE_INPUT_ATTACHMENT)
474		flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
475
476	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
477		flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
478			| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
479
480	return flags;
481}
482
483struct TestConfig
484{
485	Usage				usage;
486	vk::VkDeviceSize	size;
487	vk::VkSharingMode	sharing;
488};
489
490vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer (const vk::DeviceInterface&	vkd,
491														vk::VkDevice				device,
492														vk::VkCommandPool			pool,
493														vk::VkCommandBufferLevel	level)
494{
495	const vk::VkCommandBufferInheritanceInfo	inheritInfo	=
496	{
497		vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
498		DE_NULL,
499		0,
500		0,
501		0,
502		VK_FALSE,
503		0u,
504		0u
505	};
506	const vk::VkCommandBufferBeginInfo			beginInfo =
507	{
508		vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
509		DE_NULL,
510		0u,
511		(level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo : (const vk::VkCommandBufferInheritanceInfo*)DE_NULL),
512	};
513
514	vk::Move<vk::VkCommandBuffer> commandBuffer (allocateCommandBuffer(vkd, device, pool, level));
515
516	vkd.beginCommandBuffer(*commandBuffer, &beginInfo);
517
518	return commandBuffer;
519}
520
521vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface&	vkd,
522									 vk::VkDevice				device,
523									 vk::VkDeviceSize			size,
524									 vk::VkBufferUsageFlags		usage,
525									 vk::VkSharingMode			sharingMode,
526									 const vector<deUint32>&	queueFamilies)
527{
528	const vk::VkBufferCreateInfo	createInfo =
529	{
530		vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
531		DE_NULL,
532
533		0,	// flags
534		size,
535		usage,
536		sharingMode,
537		(deUint32)queueFamilies.size(),
538		&queueFamilies[0]
539	};
540
541	return vk::createBuffer(vkd, device, &createInfo);
542}
543
544vk::Move<vk::VkDeviceMemory> allocMemory (const vk::DeviceInterface&	vkd,
545										  vk::VkDevice					device,
546										  vk::VkDeviceSize				size,
547										  deUint32						memoryTypeIndex)
548{
549	const vk::VkMemoryAllocateInfo alloc =
550	{
551		vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,	// sType
552		DE_NULL,									// pNext
553
554		size,
555		memoryTypeIndex
556	};
557
558	return vk::allocateMemory(vkd, device, &alloc);
559}
560
561vk::Move<vk::VkDeviceMemory> bindBufferMemory (const vk::InstanceInterface&	vki,
562											   const vk::DeviceInterface&	vkd,
563											   vk::VkPhysicalDevice			physicalDevice,
564											   vk::VkDevice					device,
565											   vk::VkBuffer					buffer,
566											   vk::VkMemoryPropertyFlags	properties)
567{
568	const vk::VkMemoryRequirements				memoryRequirements	= vk::getBufferMemoryRequirements(vkd, device, buffer);
569	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
570	deUint32									memoryTypeIndex;
571
572	for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
573	{
574		if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
575			&& (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
576		{
577			try
578			{
579				const vk::VkMemoryAllocateInfo	allocationInfo	=
580				{
581					vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
582					DE_NULL,
583					memoryRequirements.size,
584					memoryTypeIndex
585				};
586				vk::Move<vk::VkDeviceMemory>	memory			(vk::allocateMemory(vkd, device, &allocationInfo));
587
588				VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
589
590				return memory;
591			}
592			catch (const vk::Error& error)
593			{
594				if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
595					|| error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
596				{
597					// Try next memory type/heap if out of memory
598				}
599				else
600				{
601					// Throw all other errors forward
602					throw;
603				}
604			}
605		}
606	}
607
608	TCU_FAIL("Failed to allocate memory for buffer");
609}
610
611vk::Move<vk::VkDeviceMemory> bindImageMemory (const vk::InstanceInterface&	vki,
612											   const vk::DeviceInterface&	vkd,
613											   vk::VkPhysicalDevice			physicalDevice,
614											   vk::VkDevice					device,
615											   vk::VkImage					image,
616											   vk::VkMemoryPropertyFlags	properties)
617{
618	const vk::VkMemoryRequirements				memoryRequirements	= vk::getImageMemoryRequirements(vkd, device, image);
619	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
620	deUint32									memoryTypeIndex;
621
622	for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
623	{
624		if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
625			&& (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
626		{
627			try
628			{
629				const vk::VkMemoryAllocateInfo	allocationInfo	=
630				{
631					vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
632					DE_NULL,
633					memoryRequirements.size,
634					memoryTypeIndex
635				};
636				vk::Move<vk::VkDeviceMemory>	memory			(vk::allocateMemory(vkd, device, &allocationInfo));
637
638				VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
639
640				return memory;
641			}
642			catch (const vk::Error& error)
643			{
644				if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
645					|| error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
646				{
647					// Try next memory type/heap if out of memory
648				}
649				else
650				{
651					// Throw all other errors forward
652					throw;
653				}
654			}
655		}
656	}
657
658	TCU_FAIL("Failed to allocate memory for image");
659}
660
661void queueRun (const vk::DeviceInterface&	vkd,
662			   vk::VkQueue					queue,
663			   vk::VkCommandBuffer			commandBuffer)
664{
665	const vk::VkSubmitInfo	submitInfo	=
666	{
667		vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
668		DE_NULL,
669
670		0,
671		DE_NULL,
672		(const vk::VkPipelineStageFlags*)DE_NULL,
673
674		1,
675		&commandBuffer,
676
677		0,
678		DE_NULL
679	};
680
681	VK_CHECK(vkd.queueSubmit(queue, 1, &submitInfo, 0));
682	VK_CHECK(vkd.queueWaitIdle(queue));
683}
684
685void* mapMemory (const vk::DeviceInterface&	vkd,
686				 vk::VkDevice				device,
687				 vk::VkDeviceMemory			memory,
688				 vk::VkDeviceSize			size)
689{
690	void* ptr;
691
692	VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
693
694	return ptr;
695}
696
697class ReferenceMemory
698{
699public:
700			ReferenceMemory	(size_t size);
701
702	void	set				(size_t pos, deUint8 val);
703	deUint8	get				(size_t pos) const;
704	bool	isDefined		(size_t pos) const;
705
706	void	setDefined		(size_t offset, size_t size, const void* data);
707	void	setUndefined	(size_t offset, size_t size);
708	void	setData			(size_t offset, size_t size, const void* data);
709
710	size_t	getSize			(void) const { return m_data.size(); }
711
712private:
713	vector<deUint8>		m_data;
714	vector<deUint64>	m_defined;
715};
716
717ReferenceMemory::ReferenceMemory (size_t size)
718	: m_data	(size, 0)
719	, m_defined	(size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
720{
721}
722
723void ReferenceMemory::set (size_t pos, deUint8 val)
724{
725	DE_ASSERT(pos < m_data.size());
726
727	m_data[pos] = val;
728	m_defined[pos / 64] |= 0x1ull << (pos % 64);
729}
730
731void ReferenceMemory::setData (size_t offset, size_t size, const void* data_)
732{
733	const deUint8* data = (const deUint8*)data_;
734
735	DE_ASSERT(offset < m_data.size());
736	DE_ASSERT(offset + size <= m_data.size());
737
738	// \todo [2016-03-09 mika] Optimize
739	for (size_t pos = 0; pos < size; pos++)
740	{
741		m_data[offset + pos] = data[pos];
742		m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
743	}
744}
745
746void ReferenceMemory::setUndefined	(size_t offset, size_t size)
747{
748	// \todo [2016-03-09 mika] Optimize
749	for (size_t pos = 0; pos < size; pos++)
750		m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
751}
752
753deUint8 ReferenceMemory::get (size_t pos) const
754{
755	DE_ASSERT(pos < m_data.size());
756	DE_ASSERT(isDefined(pos));
757	return m_data[pos];
758}
759
760bool ReferenceMemory::isDefined (size_t pos) const
761{
762	DE_ASSERT(pos < m_data.size());
763
764	return (m_defined[pos / 64] & (0x1ull << (pos % 64))) != 0;
765}
766
767class Memory
768{
769public:
770							Memory				(const vk::InstanceInterface&	vki,
771												 const vk::DeviceInterface&		vkd,
772												 vk::VkPhysicalDevice			physicalDevice,
773												 vk::VkDevice					device,
774												 vk::VkDeviceSize				size,
775												 deUint32						memoryTypeIndex,
776												 vk::VkDeviceSize				maxBufferSize,
777												 deInt32						maxImageWidth,
778												 deInt32						maxImageHeight);
779
780	vk::VkDeviceSize		getSize				(void) const { return m_size; }
781	vk::VkDeviceSize		getMaxBufferSize	(void) const { return m_maxBufferSize; }
782	bool					getSupportBuffers	(void) const { return m_maxBufferSize > 0; }
783
784	deInt32					getMaxImageWidth	(void) const { return m_maxImageWidth; }
785	deInt32					getMaxImageHeight	(void) const { return m_maxImageHeight; }
786	bool					getSupportImages	(void) const { return m_maxImageWidth > 0; }
787
788	const vk::VkMemoryType&	getMemoryType		(void) const { return m_memoryType; }
789	deUint32				getMemoryTypeIndex	(void) const { return m_memoryTypeIndex; }
790	vk::VkDeviceMemory		getMemory			(void) const { return *m_memory; }
791
792private:
793	const vk::VkDeviceSize					m_size;
794	const deUint32							m_memoryTypeIndex;
795	const vk::VkMemoryType					m_memoryType;
796	const vk::Unique<vk::VkDeviceMemory>	m_memory;
797	const vk::VkDeviceSize					m_maxBufferSize;
798	const deInt32							m_maxImageWidth;
799	const deInt32							m_maxImageHeight;
800};
801
802vk::VkMemoryType getMemoryTypeInfo (const vk::InstanceInterface&	vki,
803									vk::VkPhysicalDevice			device,
804									deUint32						memoryTypeIndex)
805{
806	const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
807
808	DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
809
810	return memoryProperties.memoryTypes[memoryTypeIndex];
811}
812
813vk::VkDeviceSize findMaxBufferSize (const vk::DeviceInterface&		vkd,
814									vk::VkDevice					device,
815
816									vk::VkBufferUsageFlags			usage,
817									vk::VkSharingMode				sharingMode,
818									const vector<deUint32>&			queueFamilies,
819
820									vk::VkDeviceSize				memorySize,
821									deUint32						memoryTypeIndex)
822{
823	vk::VkDeviceSize	lastSuccess	= 0;
824	vk::VkDeviceSize	currentSize	= memorySize / 2;
825
826	{
827		const vk::Unique<vk::VkBuffer>  buffer			(createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
828		const vk::VkMemoryRequirements  requirements	(vk::getBufferMemoryRequirements(vkd, device, *buffer));
829
830		if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
831			return memorySize;
832	}
833
834	for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
835	{
836		const vk::Unique<vk::VkBuffer>	buffer			(createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
837		const vk::VkMemoryRequirements	requirements	(vk::getBufferMemoryRequirements(vkd, device, *buffer));
838
839		if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
840		{
841			lastSuccess = currentSize;
842			currentSize += stepSize;
843		}
844		else
845			currentSize -= stepSize;
846
847		if (stepSize == 0)
848			break;
849	}
850
851	return lastSuccess;
852}
853
854// Round size down maximum W * H * 4, where W and H < 4096
855vk::VkDeviceSize roundBufferSizeToWxHx4 (vk::VkDeviceSize size)
856{
857	const vk::VkDeviceSize	maxTextureSize	= 4096;
858	vk::VkDeviceSize		maxTexelCount	= size / 4;
859	vk::VkDeviceSize		bestW			= de::max(maxTexelCount, maxTextureSize);
860	vk::VkDeviceSize		bestH			= maxTexelCount / bestW;
861
862	// \todo [2016-03-09 mika] Could probably be faster?
863	for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
864	{
865		const vk::VkDeviceSize h = maxTexelCount / w;
866
867		if (bestW * bestH < w * h)
868		{
869			bestW = w;
870			bestH = h;
871		}
872	}
873
874	return bestW * bestH * 4;
875}
876
877// Find RGBA8 image size that has exactly "size" of number of bytes.
878// "size" must be W * H * 4 where W and H < 4096
879IVec2 findImageSizeWxHx4 (vk::VkDeviceSize size)
880{
881	const vk::VkDeviceSize	maxTextureSize	= 4096;
882	vk::VkDeviceSize		texelCount		= size / 4;
883
884	DE_ASSERT((size % 4) == 0);
885
886	// \todo [2016-03-09 mika] Could probably be faster?
887	for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
888	{
889		const vk::VkDeviceSize	h	= texelCount / w;
890
891		if ((texelCount  % w) == 0 && h < maxTextureSize)
892			return IVec2((int)w, (int)h);
893	}
894
895	DE_FATAL("Invalid size");
896	return IVec2(-1, -1);
897}
898
899IVec2 findMaxRGBA8ImageSize (const vk::DeviceInterface&	vkd,
900							 vk::VkDevice				device,
901
902							 vk::VkImageUsageFlags		usage,
903							 vk::VkSharingMode			sharingMode,
904							 const vector<deUint32>&	queueFamilies,
905
906							 vk::VkDeviceSize			memorySize,
907							 deUint32					memoryTypeIndex)
908{
909	IVec2		lastSuccess		(0);
910	IVec2		currentSize;
911
912	{
913		const deUint32	texelCount	= (deUint32)(memorySize / 4);
914		const deUint32	width		= (deUint32)deFloatSqrt((float)texelCount);
915		const deUint32	height		= texelCount / width;
916
917		currentSize[0] = deMaxu32(width, height);
918		currentSize[1] = deMinu32(width, height);
919	}
920
921	for (deInt32 stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
922	{
923		const vk::VkImageCreateInfo	createInfo		=
924		{
925			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
926			DE_NULL,
927
928			0u,
929			vk::VK_IMAGE_TYPE_2D,
930			vk::VK_FORMAT_R8G8B8A8_UNORM,
931			{
932				(deUint32)currentSize[0],
933				(deUint32)currentSize[1],
934				1u,
935			},
936			1u, 1u,
937			vk::VK_SAMPLE_COUNT_1_BIT,
938			vk::VK_IMAGE_TILING_OPTIMAL,
939			usage,
940			sharingMode,
941			(deUint32)queueFamilies.size(),
942			&queueFamilies[0],
943			vk::VK_IMAGE_LAYOUT_UNDEFINED
944		};
945		const vk::Unique<vk::VkImage>	image			(vk::createImage(vkd, device, &createInfo));
946		const vk::VkMemoryRequirements	requirements	(vk::getImageMemoryRequirements(vkd, device, *image));
947
948		if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
949		{
950			lastSuccess = currentSize;
951			currentSize[0] += stepSize;
952			currentSize[1] += stepSize;
953		}
954		else
955		{
956			currentSize[0] -= stepSize;
957			currentSize[1] -= stepSize;
958		}
959
960		if (stepSize == 0)
961			break;
962	}
963
964	return lastSuccess;
965}
966
967Memory::Memory (const vk::InstanceInterface&	vki,
968				const vk::DeviceInterface&		vkd,
969				vk::VkPhysicalDevice			physicalDevice,
970				vk::VkDevice					device,
971				vk::VkDeviceSize				size,
972				deUint32						memoryTypeIndex,
973				vk::VkDeviceSize				maxBufferSize,
974				deInt32							maxImageWidth,
975				deInt32							maxImageHeight)
976	: m_size			(size)
977	, m_memoryTypeIndex	(memoryTypeIndex)
978	, m_memoryType		(getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
979	, m_memory			(allocMemory(vkd, device, size, memoryTypeIndex))
980	, m_maxBufferSize	(maxBufferSize)
981	, m_maxImageWidth	(maxImageWidth)
982	, m_maxImageHeight	(maxImageHeight)
983{
984}
985
986class Context
987{
988public:
989													Context					(const vk::InstanceInterface&					vki,
990																			 const vk::DeviceInterface&						vkd,
991																			 vk::VkPhysicalDevice							physicalDevice,
992																			 vk::VkDevice									device,
993																			 vk::VkQueue									queue,
994																			 deUint32										queueFamilyIndex,
995																			 const vector<pair<deUint32, vk::VkQueue> >&	queues,
996																			 const vk::BinaryCollection&					binaryCollection)
997		: m_vki					(vki)
998		, m_vkd					(vkd)
999		, m_physicalDevice		(physicalDevice)
1000		, m_device				(device)
1001		, m_queue				(queue)
1002		, m_queueFamilyIndex	(queueFamilyIndex)
1003		, m_queues				(queues)
1004		, m_commandPool			(createCommandPool(vkd, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex))
1005		, m_binaryCollection	(binaryCollection)
1006	{
1007		for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
1008			m_queueFamilies.push_back(m_queues[queueNdx].first);
1009	}
1010
1011	const vk::InstanceInterface&					getInstanceInterface	(void) const { return m_vki; }
1012	vk::VkPhysicalDevice							getPhysicalDevice		(void) const { return m_physicalDevice; }
1013	vk::VkDevice									getDevice				(void) const { return m_device; }
1014	const vk::DeviceInterface&						getDeviceInterface		(void) const { return m_vkd; }
1015	vk::VkQueue										getQueue				(void) const { return m_queue; }
1016	deUint32										getQueueFamily			(void) const { return m_queueFamilyIndex; }
1017	const vector<pair<deUint32, vk::VkQueue> >&		getQueues				(void) const { return m_queues; }
1018	const vector<deUint32>							getQueueFamilies		(void) const { return m_queueFamilies; }
1019	vk::VkCommandPool								getCommandPool			(void) const { return *m_commandPool; }
1020	const vk::BinaryCollection&						getBinaryCollection		(void) const { return m_binaryCollection; }
1021
1022private:
1023	const vk::InstanceInterface&					m_vki;
1024	const vk::DeviceInterface&						m_vkd;
1025	const vk::VkPhysicalDevice						m_physicalDevice;
1026	const vk::VkDevice								m_device;
1027	const vk::VkQueue								m_queue;
1028	const deUint32									m_queueFamilyIndex;
1029	const vector<pair<deUint32, vk::VkQueue> >		m_queues;
1030	const vk::Unique<vk::VkCommandPool>				m_commandPool;
1031	const vk::BinaryCollection&						m_binaryCollection;
1032	vector<deUint32>								m_queueFamilies;
1033};
1034
1035class PrepareContext
1036{
1037public:
1038													PrepareContext			(const Context&	context,
1039																			 const Memory&	memory)
1040		: m_context	(context)
1041		, m_memory	(memory)
1042	{
1043	}
1044
1045	const Memory&									getMemory				(void) const { return m_memory; }
1046	const Context&									getContext				(void) const { return m_context; }
1047	const vk::BinaryCollection&						getBinaryCollection		(void) const { return m_context.getBinaryCollection(); }
1048
1049	void				setBuffer		(vk::Move<vk::VkBuffer>	buffer,
1050										 vk::VkDeviceSize		size)
1051	{
1052		DE_ASSERT(!m_currentImage);
1053		DE_ASSERT(!m_currentBuffer);
1054
1055		m_currentBuffer		= buffer;
1056		m_currentBufferSize	= size;
1057	}
1058
1059	vk::VkBuffer		getBuffer		(void) const { return *m_currentBuffer; }
1060	vk::VkDeviceSize	getBufferSize	(void) const
1061	{
1062		DE_ASSERT(m_currentBuffer);
1063		return m_currentBufferSize;
1064	}
1065
1066	void				releaseBuffer	(void) { m_currentBuffer.disown(); }
1067
1068	void				setImage		(vk::Move<vk::VkImage>	image,
1069										 vk::VkImageLayout		layout,
1070										 vk::VkDeviceSize		memorySize,
1071										 deInt32				width,
1072										 deInt32				height)
1073	{
1074		DE_ASSERT(!m_currentImage);
1075		DE_ASSERT(!m_currentBuffer);
1076
1077		m_currentImage				= image;
1078		m_currentImageMemorySize	= memorySize;
1079		m_currentImageLayout		= layout;
1080		m_currentImageWidth			= width;
1081		m_currentImageHeight		= height;
1082	}
1083
1084	void				setImageLayout	(vk::VkImageLayout layout)
1085	{
1086		DE_ASSERT(m_currentImage);
1087		m_currentImageLayout = layout;
1088	}
1089
1090	vk::VkImage			getImage		(void) const { return *m_currentImage; }
1091	deInt32				getImageWidth	(void) const
1092	{
1093		DE_ASSERT(m_currentImage);
1094		return m_currentImageWidth;
1095	}
1096	deInt32				getImageHeight	(void) const
1097	{
1098		DE_ASSERT(m_currentImage);
1099		return m_currentImageHeight;
1100	}
1101	vk::VkDeviceSize	getImageMemorySize	(void) const
1102	{
1103		DE_ASSERT(m_currentImage);
1104		return m_currentImageMemorySize;
1105	}
1106
1107	void				releaseImage	(void) { m_currentImage.disown(); }
1108
1109	vk::VkImageLayout	getImageLayout	(void) const
1110	{
1111		DE_ASSERT(m_currentImage);
1112		return m_currentImageLayout;
1113	}
1114
1115private:
1116	const Context&			m_context;
1117	const Memory&			m_memory;
1118
1119	vk::Move<vk::VkBuffer>	m_currentBuffer;
1120	vk::VkDeviceSize		m_currentBufferSize;
1121
1122	vk::Move<vk::VkImage>	m_currentImage;
1123	vk::VkDeviceSize		m_currentImageMemorySize;
1124	vk::VkImageLayout		m_currentImageLayout;
1125	deInt32					m_currentImageWidth;
1126	deInt32					m_currentImageHeight;
1127};
1128
1129class ExecuteContext
1130{
1131public:
1132					ExecuteContext	(const Context&	context)
1133		: m_context	(context)
1134	{
1135	}
1136
1137	const Context&	getContext		(void) const { return m_context; }
1138	void			setMapping		(void* ptr) { m_mapping = ptr; }
1139	void*			getMapping		(void) const { return m_mapping; }
1140
1141private:
1142	const Context&	m_context;
1143	void*			m_mapping;
1144};
1145
1146class VerifyContext
1147{
1148public:
1149							VerifyContext		(TestLog&				log,
1150												 tcu::ResultCollector&	resultCollector,
1151												 const Context&			context,
1152												 vk::VkDeviceSize		size)
1153		: m_log				(log)
1154		, m_resultCollector	(resultCollector)
1155		, m_context			(context)
1156		, m_reference		((size_t)size)
1157	{
1158	}
1159
1160	const Context&			getContext			(void) const { return m_context; }
1161	TestLog&				getLog				(void) const { return m_log; }
1162	tcu::ResultCollector&	getResultCollector	(void) const { return m_resultCollector; }
1163
1164	ReferenceMemory&		getReference		(void) { return m_reference; }
1165	TextureLevel&			getReferenceImage	(void) { return m_referenceImage;}
1166
1167private:
1168	TestLog&				m_log;
1169	tcu::ResultCollector&	m_resultCollector;
1170	const Context&			m_context;
1171	ReferenceMemory			m_reference;
1172	TextureLevel			m_referenceImage;
1173};
1174
1175class Command
1176{
1177public:
1178	// Constructor should allocate all non-vulkan resources.
1179	virtual				~Command	(void) {}
1180
1181	// Get name of the command
1182	virtual const char*	getName		(void) const = 0;
1183
1184	// Log prepare operations
1185	virtual void		logPrepare	(TestLog&, size_t) const {}
1186	// Log executed operations
1187	virtual void		logExecute	(TestLog&, size_t) const {}
1188
1189	// Prepare should allocate all vulkan resources and resources that require
1190	// that buffer or memory has been already allocated. This should build all
1191	// command buffers etc.
1192	virtual void		prepare		(PrepareContext&) {}
1193
1194	// Execute command. Write or read mapped memory, submit commands to queue
1195	// etc.
1196	virtual void		execute		(ExecuteContext&) {}
1197
1198	// Verify that results are correct.
1199	virtual void		verify		(VerifyContext&, size_t) {}
1200
1201protected:
1202	// Allow only inheritance
1203						Command		(void) {}
1204
1205private:
1206	// Disallow copying
1207						Command		(const Command&);
1208	Command&			operator&	(const Command&);
1209};
1210
1211class Map : public Command
1212{
1213public:
1214						Map			(void) {}
1215						~Map		(void) {}
1216	const char*			getName		(void) const { return "Map"; }
1217
1218
1219	void				logExecute	(TestLog& log, size_t commandIndex) const
1220	{
1221		log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
1222	}
1223
1224	void				prepare		(PrepareContext& context)
1225	{
1226		m_memory	= context.getMemory().getMemory();
1227		m_size		= context.getMemory().getSize();
1228	}
1229
1230	void				execute		(ExecuteContext& context)
1231	{
1232		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1233		const vk::VkDevice			device	= context.getContext().getDevice();
1234
1235		context.setMapping(mapMemory(vkd, device, m_memory, m_size));
1236	}
1237
1238private:
1239	vk::VkDeviceMemory	m_memory;
1240	vk::VkDeviceSize	m_size;
1241};
1242
1243class UnMap : public Command
1244{
1245public:
1246						UnMap		(void) {}
1247						~UnMap		(void) {}
1248	const char*			getName		(void) const { return "UnMap"; }
1249
1250	void				logExecute	(TestLog& log, size_t commandIndex) const
1251	{
1252		log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
1253	}
1254
1255	void				prepare		(PrepareContext& context)
1256	{
1257		m_memory	= context.getMemory().getMemory();
1258	}
1259
1260	void				execute		(ExecuteContext& context)
1261	{
1262		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1263		const vk::VkDevice			device	= context.getContext().getDevice();
1264
1265		vkd.unmapMemory(device, m_memory);
1266		context.setMapping(DE_NULL);
1267	}
1268
1269private:
1270	vk::VkDeviceMemory	m_memory;
1271};
1272
1273class Invalidate : public Command
1274{
1275public:
1276						Invalidate	(void) {}
1277						~Invalidate	(void) {}
1278	const char*			getName		(void) const { return "Invalidate"; }
1279
1280	void				logExecute	(TestLog& log, size_t commandIndex) const
1281	{
1282		log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
1283	}
1284
1285	void				prepare		(PrepareContext& context)
1286	{
1287		m_memory	= context.getMemory().getMemory();
1288		m_size		= context.getMemory().getSize();
1289	}
1290
1291	void				execute		(ExecuteContext& context)
1292	{
1293		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1294		const vk::VkDevice			device	= context.getContext().getDevice();
1295
1296		vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1297	}
1298
1299private:
1300	vk::VkDeviceMemory	m_memory;
1301	vk::VkDeviceSize	m_size;
1302};
1303
1304class Flush : public Command
1305{
1306public:
1307						Flush		(void) {}
1308						~Flush		(void) {}
1309	const char*			getName		(void) const { return "Flush"; }
1310
1311	void				logExecute	(TestLog& log, size_t commandIndex) const
1312	{
1313		log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
1314	}
1315
1316	void				prepare		(PrepareContext& context)
1317	{
1318		m_memory	= context.getMemory().getMemory();
1319		m_size		= context.getMemory().getSize();
1320	}
1321
1322	void				execute		(ExecuteContext& context)
1323	{
1324		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1325		const vk::VkDevice			device	= context.getContext().getDevice();
1326
1327		vk::flushMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1328	}
1329
1330private:
1331	vk::VkDeviceMemory	m_memory;
1332	vk::VkDeviceSize	m_size;
1333};
1334
1335// Host memory reads and writes
1336class HostMemoryAccess : public Command
1337{
1338public:
1339					HostMemoryAccess	(bool read, bool write, deUint32 seed);
1340					~HostMemoryAccess	(void) {}
1341	const char*		getName				(void) const { return "HostMemoryAccess"; }
1342
1343	void			logExecute			(TestLog& log, size_t commandIndex) const;
1344	void			prepare				(PrepareContext& context);
1345	void			execute				(ExecuteContext& context);
1346	void			verify				(VerifyContext& context, size_t commandIndex);
1347
1348private:
1349	const bool		m_read;
1350	const bool		m_write;
1351	const deUint32	m_seed;
1352
1353	size_t			m_size;
1354	vector<deUint8>	m_readData;
1355};
1356
1357HostMemoryAccess::HostMemoryAccess (bool read, bool write, deUint32 seed)
1358	: m_read	(read)
1359	, m_write	(write)
1360	, m_seed	(seed)
1361{
1362}
1363
1364void HostMemoryAccess::logExecute (TestLog& log, size_t commandIndex) const
1365{
1366	log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "") << (m_write ? " write" : "")  << ", seed: " << m_seed << TestLog::EndMessage;
1367}
1368
1369void HostMemoryAccess::prepare (PrepareContext& context)
1370{
1371	m_size = (size_t)context.getMemory().getSize();
1372
1373	if (m_read)
1374		m_readData.resize(m_size, 0);
1375}
1376
1377void HostMemoryAccess::execute (ExecuteContext& context)
1378{
1379	de::Random		rng	(m_seed);
1380	deUint8* const	ptr	= (deUint8*)context.getMapping();
1381
1382	if (m_read && m_write)
1383	{
1384		for (size_t pos = 0; pos < m_size; pos++)
1385		{
1386			const deUint8	mask	= rng.getUint8();
1387			const deUint8	value	= ptr[pos];
1388
1389			m_readData[pos] = value;
1390			ptr[pos] = value ^ mask;
1391		}
1392	}
1393	else if (m_read)
1394	{
1395		for (size_t pos = 0; pos < m_size; pos++)
1396		{
1397			const deUint8	value	= ptr[pos];
1398
1399			m_readData[pos] = value;
1400		}
1401	}
1402	else if (m_write)
1403	{
1404		for (size_t pos = 0; pos < m_size; pos++)
1405		{
1406			const deUint8	value	= rng.getUint8();
1407
1408			ptr[pos] = value;
1409		}
1410	}
1411	else
1412		DE_FATAL("Host memory access without read or write.");
1413}
1414
1415void HostMemoryAccess::verify (VerifyContext& context, size_t commandIndex)
1416{
1417	tcu::ResultCollector&	resultCollector	= context.getResultCollector();
1418	ReferenceMemory&		reference		= context.getReference();
1419	de::Random				rng				(m_seed);
1420
1421	if (m_read && m_write)
1422	{
1423		for (size_t pos = 0; pos < m_size; pos++)
1424		{
1425			const deUint8	mask	= rng.getUint8();
1426			const deUint8	value	= m_readData[pos];
1427
1428			if (reference.isDefined(pos))
1429			{
1430				if (value != reference.get(pos))
1431				{
1432					resultCollector.fail(
1433							de::toString(commandIndex) + ":" + getName()
1434							+ " Result differs from reference, Expected: "
1435							+ de::toString(tcu::toHex<8>(reference.get(pos)))
1436							+ ", Got: "
1437							+ de::toString(tcu::toHex<8>(value))
1438							+ ", At offset: "
1439							+ de::toString(pos));
1440					break;
1441				}
1442
1443				reference.set(pos, reference.get(pos) ^ mask);
1444			}
1445		}
1446	}
1447	else if (m_read)
1448	{
1449		for (size_t pos = 0; pos < m_size; pos++)
1450		{
1451			const deUint8	value	= m_readData[pos];
1452
1453			if (reference.isDefined(pos))
1454			{
1455				if (value != reference.get(pos))
1456				{
1457					resultCollector.fail(
1458							de::toString(commandIndex) + ":" + getName()
1459							+ " Result differs from reference, Expected: "
1460							+ de::toString(tcu::toHex<8>(reference.get(pos)))
1461							+ ", Got: "
1462							+ de::toString(tcu::toHex<8>(value))
1463							+ ", At offset: "
1464							+ de::toString(pos));
1465					break;
1466				}
1467			}
1468		}
1469	}
1470	else if (m_write)
1471	{
1472		for (size_t pos = 0; pos < m_size; pos++)
1473		{
1474			const deUint8	value	= rng.getUint8();
1475
1476			reference.set(pos, value);
1477		}
1478	}
1479	else
1480		DE_FATAL("Host memory access without read or write.");
1481}
1482
1483class CreateBuffer : public Command
1484{
1485public:
1486									CreateBuffer	(vk::VkBufferUsageFlags	usage,
1487													 vk::VkSharingMode		sharing);
1488									~CreateBuffer	(void) {}
1489	const char*						getName			(void) const { return "CreateBuffer"; }
1490
1491	void							logPrepare		(TestLog& log, size_t commandIndex) const;
1492	void							prepare			(PrepareContext& context);
1493
1494private:
1495	const vk::VkBufferUsageFlags	m_usage;
1496	const vk::VkSharingMode			m_sharing;
1497};
1498
1499CreateBuffer::CreateBuffer (vk::VkBufferUsageFlags	usage,
1500							vk::VkSharingMode		sharing)
1501	: m_usage	(usage)
1502	, m_sharing	(sharing)
1503{
1504}
1505
1506void CreateBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1507{
1508	log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
1509}
1510
1511void CreateBuffer::prepare (PrepareContext& context)
1512{
1513	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1514	const vk::VkDevice			device			= context.getContext().getDevice();
1515	const vk::VkDeviceSize		bufferSize		= context.getMemory().getMaxBufferSize();
1516	const vector<deUint32>&		queueFamilies	= context.getContext().getQueueFamilies();
1517
1518	context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
1519}
1520
1521class DestroyBuffer : public Command
1522{
1523public:
1524							DestroyBuffer	(void);
1525							~DestroyBuffer	(void) {}
1526	const char*				getName			(void) const { return "DestroyBuffer"; }
1527
1528	void					logExecute		(TestLog& log, size_t commandIndex) const;
1529	void					prepare			(PrepareContext& context);
1530	void					execute			(ExecuteContext& context);
1531
1532private:
1533	vk::Move<vk::VkBuffer>	m_buffer;
1534};
1535
1536DestroyBuffer::DestroyBuffer (void)
1537{
1538}
1539
1540void DestroyBuffer::prepare (PrepareContext& context)
1541{
1542	m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()), vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1543	context.releaseBuffer();
1544}
1545
1546void DestroyBuffer::logExecute (TestLog& log, size_t commandIndex) const
1547{
1548	log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
1549}
1550
1551void DestroyBuffer::execute (ExecuteContext& context)
1552{
1553	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1554	const vk::VkDevice			device			= context.getContext().getDevice();
1555
1556	vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
1557}
1558
1559class BindBufferMemory : public Command
1560{
1561public:
1562				BindBufferMemory	(void) {}
1563				~BindBufferMemory	(void) {}
1564	const char*	getName				(void) const { return "BindBufferMemory"; }
1565
1566	void		logPrepare			(TestLog& log, size_t commandIndex) const;
1567	void		prepare				(PrepareContext& context);
1568};
1569
1570void BindBufferMemory::logPrepare (TestLog& log, size_t commandIndex) const
1571{
1572	log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
1573}
1574
1575void BindBufferMemory::prepare (PrepareContext& context)
1576{
1577	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1578	const vk::VkDevice			device			= context.getContext().getDevice();
1579
1580	VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
1581}
1582
1583class CreateImage : public Command
1584{
1585public:
1586									CreateImage		(vk::VkImageUsageFlags	usage,
1587													 vk::VkSharingMode		sharing);
1588									~CreateImage	(void) {}
1589	const char*						getName			(void) const { return "CreateImage"; }
1590
1591	void							logPrepare		(TestLog& log, size_t commandIndex) const;
1592	void							prepare			(PrepareContext& context);
1593	void							verify			(VerifyContext& context, size_t commandIndex);
1594
1595private:
1596	const vk::VkImageUsageFlags	m_usage;
1597	const vk::VkSharingMode		m_sharing;
1598	deInt32						m_imageWidth;
1599	deInt32						m_imageHeight;
1600};
1601
1602CreateImage::CreateImage (vk::VkImageUsageFlags	usage,
1603						  vk::VkSharingMode		sharing)
1604	: m_usage	(usage)
1605	, m_sharing	(sharing)
1606{
1607}
1608
1609void CreateImage::logPrepare (TestLog& log, size_t commandIndex) const
1610{
1611	log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing << ", usage: " << vk::getImageUsageFlagsStr(m_usage)  << TestLog::EndMessage;
1612}
1613
1614void CreateImage::prepare (PrepareContext& context)
1615{
1616	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1617	const vk::VkDevice			device			= context.getContext().getDevice();
1618	const vector<deUint32>&		queueFamilies	= context.getContext().getQueueFamilies();
1619
1620	m_imageWidth	= context.getMemory().getMaxImageWidth();
1621	m_imageHeight	= context.getMemory().getMaxImageHeight();
1622
1623	{
1624		const vk::VkImageCreateInfo	createInfo		=
1625		{
1626			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1627			DE_NULL,
1628
1629			0u,
1630			vk::VK_IMAGE_TYPE_2D,
1631			vk::VK_FORMAT_R8G8B8A8_UNORM,
1632			{
1633				(deUint32)m_imageWidth,
1634				(deUint32)m_imageHeight,
1635				1u,
1636			},
1637			1u, 1u,
1638			vk::VK_SAMPLE_COUNT_1_BIT,
1639			vk::VK_IMAGE_TILING_OPTIMAL,
1640			m_usage,
1641			m_sharing,
1642			(deUint32)queueFamilies.size(),
1643			&queueFamilies[0],
1644			vk::VK_IMAGE_LAYOUT_UNDEFINED
1645		};
1646		vk::Move<vk::VkImage>			image			(createImage(vkd, device, &createInfo));
1647		const vk::VkMemoryRequirements	requirements	= vk::getImageMemoryRequirements(vkd, device, *image);
1648
1649		context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
1650	}
1651}
1652
1653void CreateImage::verify (VerifyContext& context, size_t)
1654{
1655	context.getReferenceImage() = TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
1656}
1657
1658class DestroyImage : public Command
1659{
1660public:
1661							DestroyImage	(void);
1662							~DestroyImage	(void) {}
1663	const char*				getName			(void) const { return "DestroyImage"; }
1664
1665	void					logExecute		(TestLog& log, size_t commandIndex) const;
1666	void					prepare			(PrepareContext& context);
1667	void					execute			(ExecuteContext& context);
1668
1669private:
1670	vk::Move<vk::VkImage>	m_image;
1671};
1672
1673DestroyImage::DestroyImage (void)
1674{
1675}
1676
1677void DestroyImage::prepare (PrepareContext& context)
1678{
1679	m_image = vk::Move<vk::VkImage>(vk::check(context.getImage()), vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1680	context.releaseImage();
1681}
1682
1683
1684void DestroyImage::logExecute (TestLog& log, size_t commandIndex) const
1685{
1686	log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
1687}
1688
1689void DestroyImage::execute (ExecuteContext& context)
1690{
1691	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1692	const vk::VkDevice			device			= context.getContext().getDevice();
1693
1694	vkd.destroyImage(device, m_image.disown(), DE_NULL);
1695}
1696
1697class BindImageMemory : public Command
1698{
1699public:
1700				BindImageMemory		(void) {}
1701				~BindImageMemory	(void) {}
1702	const char*	getName				(void) const { return "BindImageMemory"; }
1703
1704	void		logPrepare			(TestLog& log, size_t commandIndex) const;
1705	void		prepare				(PrepareContext& context);
1706};
1707
1708void BindImageMemory::logPrepare (TestLog& log, size_t commandIndex) const
1709{
1710	log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
1711}
1712
1713void BindImageMemory::prepare (PrepareContext& context)
1714{
1715	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
1716	const vk::VkDevice				device			= context.getContext().getDevice();
1717
1718	VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
1719}
1720
1721class QueueWaitIdle : public Command
1722{
1723public:
1724				QueueWaitIdle	(void) {}
1725				~QueueWaitIdle	(void) {}
1726	const char*	getName			(void) const { return "QueuetWaitIdle"; }
1727
1728	void		logExecute		(TestLog& log, size_t commandIndex) const;
1729	void		execute			(ExecuteContext& context);
1730};
1731
1732void QueueWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1733{
1734	log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
1735}
1736
1737void QueueWaitIdle::execute (ExecuteContext& context)
1738{
1739	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1740	const vk::VkQueue			queue	= context.getContext().getQueue();
1741
1742	VK_CHECK(vkd.queueWaitIdle(queue));
1743}
1744
1745class DeviceWaitIdle : public Command
1746{
1747public:
1748				DeviceWaitIdle	(void) {}
1749				~DeviceWaitIdle	(void) {}
1750	const char*	getName			(void) const { return "DeviceWaitIdle"; }
1751
1752	void		logExecute		(TestLog& log, size_t commandIndex) const;
1753	void		execute			(ExecuteContext& context);
1754};
1755
1756void DeviceWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1757{
1758	log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
1759}
1760
1761void DeviceWaitIdle::execute (ExecuteContext& context)
1762{
1763	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1764	const vk::VkDevice			device	= context.getContext().getDevice();
1765
1766	VK_CHECK(vkd.deviceWaitIdle(device));
1767}
1768
1769class SubmitContext
1770{
1771public:
1772								SubmitContext		(const PrepareContext&		context,
1773													 const vk::VkCommandBuffer	commandBuffer)
1774		: m_context			(context)
1775		, m_commandBuffer	(commandBuffer)
1776	{
1777	}
1778
1779	const Memory&				getMemory			(void) const { return m_context.getMemory(); }
1780	const Context&				getContext			(void) const { return m_context.getContext(); }
1781	vk::VkCommandBuffer			getCommandBuffer	(void) const { return m_commandBuffer; }
1782
1783	vk::VkBuffer				getBuffer			(void) const { return m_context.getBuffer(); }
1784	vk::VkDeviceSize			getBufferSize		(void) const { return m_context.getBufferSize(); }
1785
1786	vk::VkImage					getImage			(void) const { return m_context.getImage(); }
1787	deInt32						getImageWidth		(void) const { return m_context.getImageWidth(); }
1788	deInt32						getImageHeight		(void) const { return m_context.getImageHeight(); }
1789
1790private:
1791	const PrepareContext&		m_context;
1792	const vk::VkCommandBuffer	m_commandBuffer;
1793};
1794
1795class CmdCommand
1796{
1797public:
1798	virtual				~CmdCommand	(void) {}
1799	virtual const char*	getName		(void) const = 0;
1800
1801	// Log things that are done during prepare
1802	virtual void		logPrepare	(TestLog&, size_t) const {}
1803	// Log submitted calls etc.
1804	virtual void		logSubmit	(TestLog&, size_t) const {}
1805
1806	// Allocate vulkan resources and prepare for submit.
1807	virtual void		prepare		(PrepareContext&) {}
1808
1809	// Submit commands to command buffer.
1810	virtual void		submit		(SubmitContext&) {}
1811
1812	// Verify results
1813	virtual void		verify		(VerifyContext&, size_t) {}
1814};
1815
1816class SubmitCommandBuffer : public Command
1817{
1818public:
1819					SubmitCommandBuffer		(const vector<CmdCommand*>& commands);
1820					~SubmitCommandBuffer	(void);
1821
1822	const char*		getName					(void) const { return "SubmitCommandBuffer"; }
1823	void			logExecute				(TestLog& log, size_t commandIndex) const;
1824	void			logPrepare				(TestLog& log, size_t commandIndex) const;
1825
1826	// Allocate command buffer and submit commands to command buffer
1827	void			prepare					(PrepareContext& context);
1828	void			execute					(ExecuteContext& context);
1829
1830	// Verify that results are correct.
1831	void			verify					(VerifyContext& context, size_t commandIndex);
1832
1833private:
1834	vector<CmdCommand*>				m_commands;
1835	vk::Move<vk::VkCommandBuffer>	m_commandBuffer;
1836};
1837
1838SubmitCommandBuffer::SubmitCommandBuffer (const vector<CmdCommand*>& commands)
1839	: m_commands	(commands)
1840{
1841}
1842
1843SubmitCommandBuffer::~SubmitCommandBuffer (void)
1844{
1845	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1846		delete m_commands[cmdNdx];
1847}
1848
1849void SubmitCommandBuffer::prepare (PrepareContext& context)
1850{
1851	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
1852	const vk::VkDevice			device		= context.getContext().getDevice();
1853	const vk::VkCommandPool		commandPool	= context.getContext().getCommandPool();
1854
1855	m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1856
1857	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1858	{
1859		CmdCommand& command = *m_commands[cmdNdx];
1860
1861		command.prepare(context);
1862	}
1863
1864	{
1865		SubmitContext submitContext (context, *m_commandBuffer);
1866
1867		for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1868		{
1869			CmdCommand& command = *m_commands[cmdNdx];
1870
1871			command.submit(submitContext);
1872		}
1873
1874		VK_CHECK(vkd.endCommandBuffer(*m_commandBuffer));
1875	}
1876}
1877
1878void SubmitCommandBuffer::execute (ExecuteContext& context)
1879{
1880	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1881	const vk::VkCommandBuffer	cmd		= *m_commandBuffer;
1882	const vk::VkQueue			queue	= context.getContext().getQueue();
1883	const vk::VkSubmitInfo		submit	=
1884	{
1885		vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1886		DE_NULL,
1887
1888		0,
1889		DE_NULL,
1890		(const vk::VkPipelineStageFlags*)DE_NULL,
1891
1892		1,
1893		&cmd,
1894
1895		0,
1896		DE_NULL
1897	};
1898
1899	vkd.queueSubmit(queue, 1, &submit, 0);
1900}
1901
1902void SubmitCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
1903{
1904	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1905	const tcu::ScopedLogSection	section		(context.getLog(), sectionName, sectionName);
1906
1907	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1908		m_commands[cmdNdx]->verify(context, cmdNdx);
1909}
1910
1911void SubmitCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1912{
1913	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1914	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
1915
1916	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1917		m_commands[cmdNdx]->logPrepare(log, cmdNdx);
1918}
1919
1920void SubmitCommandBuffer::logExecute (TestLog& log, size_t commandIndex) const
1921{
1922	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1923	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
1924
1925	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1926		m_commands[cmdNdx]->logSubmit(log, cmdNdx);
1927}
1928
1929class PipelineBarrier : public CmdCommand
1930{
1931public:
1932	enum Type
1933	{
1934		TYPE_GLOBAL = 0,
1935		TYPE_BUFFER,
1936		TYPE_IMAGE,
1937		TYPE_LAST
1938	};
1939									PipelineBarrier		(const vk::VkPipelineStageFlags			srcStages,
1940														 const vk::VkAccessFlags				srcAccesses,
1941														 const vk::VkPipelineStageFlags			dstStages,
1942														 const vk::VkAccessFlags				dstAccesses,
1943														 Type									type,
1944														 const tcu::Maybe<vk::VkImageLayout>	imageLayout);
1945									~PipelineBarrier	(void) {}
1946	const char*						getName				(void) const { return "PipelineBarrier"; }
1947
1948	void							logSubmit			(TestLog& log, size_t commandIndex) const;
1949	void							submit				(SubmitContext& context);
1950
1951private:
1952	const vk::VkPipelineStageFlags		m_srcStages;
1953	const vk::VkAccessFlags				m_srcAccesses;
1954	const vk::VkPipelineStageFlags		m_dstStages;
1955	const vk::VkAccessFlags				m_dstAccesses;
1956	const Type							m_type;
1957	const tcu::Maybe<vk::VkImageLayout>	m_imageLayout;
1958};
1959
1960PipelineBarrier::PipelineBarrier (const vk::VkPipelineStageFlags		srcStages,
1961								  const vk::VkAccessFlags				srcAccesses,
1962								  const vk::VkPipelineStageFlags		dstStages,
1963								  const vk::VkAccessFlags				dstAccesses,
1964								  Type									type,
1965								  const tcu::Maybe<vk::VkImageLayout>	imageLayout)
1966	: m_srcStages	(srcStages)
1967	, m_srcAccesses	(srcAccesses)
1968	, m_dstStages	(dstStages)
1969	, m_dstAccesses	(dstAccesses)
1970	, m_type		(type)
1971	, m_imageLayout	(imageLayout)
1972{
1973}
1974
1975void PipelineBarrier::logSubmit (TestLog& log, size_t commandIndex) const
1976{
1977	log << TestLog::Message << commandIndex << ":" << getName()
1978		<< " " << (m_type == TYPE_GLOBAL ? "Global pipeline barrier"
1979					: m_type == TYPE_BUFFER ? "Buffer pipeline barrier"
1980					: "Image pipeline barrier")
1981		<< ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
1982		<< ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
1983}
1984
1985void PipelineBarrier::submit (SubmitContext& context)
1986{
1987	const vk::DeviceInterface&	vkd	= context.getContext().getDeviceInterface();
1988	const vk::VkCommandBuffer	cmd	= context.getCommandBuffer();
1989
1990	switch (m_type)
1991	{
1992		case TYPE_GLOBAL:
1993		{
1994			const vk::VkMemoryBarrier	barrier		=
1995			{
1996				vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,
1997				DE_NULL,
1998
1999				m_srcAccesses,
2000				m_dstAccesses
2001			};
2002
2003			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2004			break;
2005		}
2006
2007		case TYPE_BUFFER:
2008		{
2009			const vk::VkBufferMemoryBarrier	barrier		=
2010			{
2011				vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2012				DE_NULL,
2013
2014				m_srcAccesses,
2015				m_dstAccesses,
2016
2017				VK_QUEUE_FAMILY_IGNORED,
2018				VK_QUEUE_FAMILY_IGNORED,
2019
2020				context.getBuffer(),
2021				0,
2022				VK_WHOLE_SIZE
2023			};
2024
2025			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2026			break;
2027		}
2028
2029		case TYPE_IMAGE:
2030		{
2031			const vk::VkImageMemoryBarrier	barrier		=
2032			{
2033				vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2034				DE_NULL,
2035
2036				m_srcAccesses,
2037				m_dstAccesses,
2038
2039				*m_imageLayout,
2040				*m_imageLayout,
2041
2042				VK_QUEUE_FAMILY_IGNORED,
2043				VK_QUEUE_FAMILY_IGNORED,
2044
2045				context.getImage(),
2046				{
2047					vk::VK_IMAGE_ASPECT_COLOR_BIT,
2048					0, 1,
2049					0, 1
2050				}
2051			};
2052
2053			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2054			break;
2055		}
2056
2057		default:
2058			DE_FATAL("Unknown pipeline barrier type");
2059	}
2060}
2061
2062class ImageTransition : public CmdCommand
2063{
2064public:
2065						ImageTransition		(vk::VkPipelineStageFlags	srcStages,
2066											 vk::VkAccessFlags			srcAccesses,
2067
2068											 vk::VkPipelineStageFlags	dstStages,
2069											 vk::VkAccessFlags			dstAccesses,
2070
2071											 vk::VkImageLayout			srcLayout,
2072											 vk::VkImageLayout			dstLayout);
2073
2074						~ImageTransition	(void) {}
2075	const char*			getName				(void) const { return "ImageTransition"; }
2076
2077	void				prepare				(PrepareContext& context);
2078	void				logSubmit			(TestLog& log, size_t commandIndex) const;
2079	void				submit				(SubmitContext& context);
2080	void				verify				(VerifyContext& context, size_t);
2081
2082private:
2083	const vk::VkPipelineStageFlags	m_srcStages;
2084	const vk::VkAccessFlags			m_srcAccesses;
2085	const vk::VkPipelineStageFlags	m_dstStages;
2086	const vk::VkAccessFlags			m_dstAccesses;
2087	const vk::VkImageLayout			m_srcLayout;
2088	const vk::VkImageLayout			m_dstLayout;
2089
2090	vk::VkDeviceSize				m_imageMemorySize;
2091};
2092
2093ImageTransition::ImageTransition (vk::VkPipelineStageFlags	srcStages,
2094								  vk::VkAccessFlags			srcAccesses,
2095
2096								  vk::VkPipelineStageFlags	dstStages,
2097								  vk::VkAccessFlags			dstAccesses,
2098
2099								  vk::VkImageLayout			srcLayout,
2100								  vk::VkImageLayout			dstLayout)
2101	: m_srcStages		(srcStages)
2102	, m_srcAccesses		(srcAccesses)
2103	, m_dstStages		(dstStages)
2104	, m_dstAccesses		(dstAccesses)
2105	, m_srcLayout		(srcLayout)
2106	, m_dstLayout		(dstLayout)
2107{
2108}
2109
2110void ImageTransition::logSubmit (TestLog& log, size_t commandIndex) const
2111{
2112	log << TestLog::Message << commandIndex << ":" << getName()
2113		<< " Image transition pipeline barrier"
2114		<< ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2115		<< ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses)
2116		<< ", srcLayout: " << m_srcLayout << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
2117}
2118
2119void ImageTransition::prepare (PrepareContext& context)
2120{
2121	DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED || m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
2122
2123	context.setImageLayout(m_dstLayout);
2124	m_imageMemorySize = context.getImageMemorySize();
2125}
2126
2127void ImageTransition::submit (SubmitContext& context)
2128{
2129	const vk::DeviceInterface&		vkd			= context.getContext().getDeviceInterface();
2130	const vk::VkCommandBuffer		cmd			= context.getCommandBuffer();
2131	const vk::VkImageMemoryBarrier	barrier		=
2132	{
2133		vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2134		DE_NULL,
2135
2136		m_srcAccesses,
2137		m_dstAccesses,
2138
2139		m_srcLayout,
2140		m_dstLayout,
2141
2142		VK_QUEUE_FAMILY_IGNORED,
2143		VK_QUEUE_FAMILY_IGNORED,
2144
2145		context.getImage(),
2146		{
2147			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2148			0u, 1u,
2149			0u, 1u
2150		}
2151	};
2152
2153	vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2154}
2155
2156void ImageTransition::verify (VerifyContext& context, size_t)
2157{
2158	context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
2159}
2160
2161class FillBuffer : public CmdCommand
2162{
2163public:
2164						FillBuffer	(deUint32 value) : m_value(value) {}
2165						~FillBuffer	(void) {}
2166	const char*			getName		(void) const { return "FillBuffer"; }
2167
2168	void				logSubmit	(TestLog& log, size_t commandIndex) const;
2169	void				submit		(SubmitContext& context);
2170	void				verify		(VerifyContext& context, size_t commandIndex);
2171
2172private:
2173	const deUint32		m_value;
2174	vk::VkDeviceSize	m_bufferSize;
2175};
2176
2177void FillBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2178{
2179	log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
2180}
2181
2182void FillBuffer::submit (SubmitContext& context)
2183{
2184	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
2185	const vk::VkCommandBuffer	cmd			= context.getCommandBuffer();
2186	const vk::VkBuffer			buffer		= context.getBuffer();
2187	const vk::VkDeviceSize		sizeMask	= ~(0x3ull); // \note Round down to multiple of 4
2188
2189	m_bufferSize = sizeMask & context.getBufferSize();
2190	vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
2191}
2192
2193void FillBuffer::verify (VerifyContext& context, size_t)
2194{
2195	ReferenceMemory&	reference	= context.getReference();
2196
2197	for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
2198	{
2199#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
2200		reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(ndx % 4)))));
2201#else
2202		reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(3 - (ndx % 4))))));
2203#endif
2204	}
2205}
2206
2207class UpdateBuffer : public CmdCommand
2208{
2209public:
2210						UpdateBuffer	(deUint32 seed) : m_seed(seed) {}
2211						~UpdateBuffer	(void) {}
2212	const char*			getName			(void) const { return "UpdateBuffer"; }
2213
2214	void				logSubmit		(TestLog& log, size_t commandIndex) const;
2215	void				submit			(SubmitContext& context);
2216	void				verify			(VerifyContext& context, size_t commandIndex);
2217
2218private:
2219	const deUint32		m_seed;
2220	vk::VkDeviceSize	m_bufferSize;
2221};
2222
2223void UpdateBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2224{
2225	log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed << TestLog::EndMessage;
2226}
2227
2228void UpdateBuffer::submit (SubmitContext& context)
2229{
2230	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
2231	const vk::VkCommandBuffer	cmd			= context.getCommandBuffer();
2232	const vk::VkBuffer			buffer		= context.getBuffer();
2233	const size_t				blockSize	= 65536;
2234	std::vector<deUint8>		data		(blockSize, 0);
2235	de::Random					rng			(m_seed);
2236
2237	m_bufferSize = context.getBufferSize();
2238
2239	for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2240	{
2241		for (size_t ndx = 0; ndx < data.size(); ndx++)
2242			data[ndx] = rng.getUint8();
2243
2244		if (m_bufferSize - updated > blockSize)
2245			vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const deUint32*)(&data[0]));
2246		else
2247			vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const deUint32*)(&data[0]));
2248	}
2249}
2250
2251void UpdateBuffer::verify (VerifyContext& context, size_t)
2252{
2253	ReferenceMemory&	reference	= context.getReference();
2254	const size_t		blockSize	= 65536;
2255	vector<deUint8>		data		(blockSize, 0);
2256	de::Random			rng			(m_seed);
2257
2258	for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2259	{
2260		for (size_t ndx = 0; ndx < data.size(); ndx++)
2261			data[ndx] = rng.getUint8();
2262
2263		if (m_bufferSize - updated > blockSize)
2264			reference.setData(updated, blockSize, &data[0]);
2265		else
2266			reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
2267	}
2268}
2269
2270class BufferCopyToBuffer : public CmdCommand
2271{
2272public:
2273									BufferCopyToBuffer	(void) {}
2274									~BufferCopyToBuffer	(void) {}
2275	const char*						getName				(void) const { return "BufferCopyToBuffer"; }
2276
2277	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2278	void							prepare				(PrepareContext& context);
2279	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2280	void							submit				(SubmitContext& context);
2281	void							verify				(VerifyContext& context, size_t commandIndex);
2282
2283private:
2284	vk::VkDeviceSize				m_bufferSize;
2285	vk::Move<vk::VkBuffer>			m_dstBuffer;
2286	vk::Move<vk::VkDeviceMemory>	m_memory;
2287};
2288
2289void BufferCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2290{
2291	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
2292}
2293
2294void BufferCopyToBuffer::prepare (PrepareContext& context)
2295{
2296	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2297	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2298	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2299	const vk::VkDevice				device			= context.getContext().getDevice();
2300	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2301
2302	m_bufferSize = context.getBufferSize();
2303
2304	m_dstBuffer	= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2305	m_memory	= bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2306}
2307
2308void BufferCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2309{
2310	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer" << TestLog::EndMessage;
2311}
2312
2313void BufferCopyToBuffer::submit (SubmitContext& context)
2314{
2315	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2316	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2317	const vk::VkBufferCopy		range			=
2318	{
2319		0, 0, // Offsets
2320		m_bufferSize
2321	};
2322
2323	vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
2324}
2325
2326void BufferCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2327{
2328	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2329	ReferenceMemory&						reference		(context.getReference());
2330	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2331	const vk::VkDevice						device			= context.getContext().getDevice();
2332	const vk::VkQueue						queue			= context.getContext().getQueue();
2333	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2334	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2335	const vk::VkBufferMemoryBarrier			barrier			=
2336	{
2337		vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2338		DE_NULL,
2339
2340		vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2341		vk::VK_ACCESS_HOST_READ_BIT,
2342
2343		VK_QUEUE_FAMILY_IGNORED,
2344		VK_QUEUE_FAMILY_IGNORED,
2345		*m_dstBuffer,
2346		0,
2347		VK_WHOLE_SIZE
2348	};
2349
2350	vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2351
2352	VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2353	queueRun(vkd, queue, *commandBuffer);
2354
2355	{
2356		void* const	ptr		= mapMemory(vkd, device, *m_memory, m_bufferSize);
2357		bool		isOk	= true;
2358
2359		vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2360
2361		{
2362			const deUint8* const data = (const deUint8*)ptr;
2363
2364			for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
2365			{
2366				if (reference.isDefined(pos))
2367				{
2368					if (data[pos] != reference.get(pos))
2369					{
2370						resultCollector.fail(
2371								de::toString(commandIndex) + ":" + getName()
2372								+ " Result differs from reference, Expected: "
2373								+ de::toString(tcu::toHex<8>(reference.get(pos)))
2374								+ ", Got: "
2375								+ de::toString(tcu::toHex<8>(data[pos]))
2376								+ ", At offset: "
2377								+ de::toString(pos));
2378						break;
2379					}
2380				}
2381			}
2382		}
2383
2384		vkd.unmapMemory(device, *m_memory);
2385
2386		if (!isOk)
2387			context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed" << TestLog::EndMessage;
2388	}
2389}
2390
2391class BufferCopyFromBuffer : public CmdCommand
2392{
2393public:
2394									BufferCopyFromBuffer	(deUint32 seed) : m_seed(seed) {}
2395									~BufferCopyFromBuffer	(void) {}
2396	const char*						getName					(void) const { return "BufferCopyFromBuffer"; }
2397
2398	void							logPrepare				(TestLog& log, size_t commandIndex) const;
2399	void							prepare					(PrepareContext& context);
2400	void							logSubmit				(TestLog& log, size_t commandIndex) const;
2401	void							submit					(SubmitContext& context);
2402	void							verify					(VerifyContext& context, size_t commandIndex);
2403
2404private:
2405	const deUint32					m_seed;
2406	vk::VkDeviceSize				m_bufferSize;
2407	vk::Move<vk::VkBuffer>			m_srcBuffer;
2408	vk::Move<vk::VkDeviceMemory>	m_memory;
2409};
2410
2411void BufferCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2412{
2413	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
2414}
2415
2416void BufferCopyFromBuffer::prepare (PrepareContext& context)
2417{
2418	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2419	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2420	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2421	const vk::VkDevice				device			= context.getContext().getDevice();
2422	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2423
2424	m_bufferSize	= context.getBufferSize();
2425	m_srcBuffer		= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2426	m_memory		= bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2427
2428	{
2429		void* const	ptr	= mapMemory(vkd, device, *m_memory, m_bufferSize);
2430		de::Random	rng	(m_seed);
2431
2432		{
2433			deUint8* const	data = (deUint8*)ptr;
2434
2435			for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2436				data[ndx] = rng.getUint8();
2437		}
2438
2439		vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2440		vkd.unmapMemory(device, *m_memory);
2441	}
2442}
2443
2444void BufferCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2445{
2446	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer" << TestLog::EndMessage;
2447}
2448
2449void BufferCopyFromBuffer::submit (SubmitContext& context)
2450{
2451	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2452	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2453	const vk::VkBufferCopy		range			=
2454	{
2455		0, 0, // Offsets
2456		m_bufferSize
2457	};
2458
2459	vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
2460}
2461
2462void BufferCopyFromBuffer::verify (VerifyContext& context, size_t)
2463{
2464	ReferenceMemory&	reference	(context.getReference());
2465	de::Random			rng			(m_seed);
2466
2467	for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2468		reference.set(ndx, rng.getUint8());
2469}
2470
2471class BufferCopyToImage : public CmdCommand
2472{
2473public:
2474									BufferCopyToImage	(void) {}
2475									~BufferCopyToImage	(void) {}
2476	const char*						getName				(void) const { return "BufferCopyToImage"; }
2477
2478	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2479	void							prepare				(PrepareContext& context);
2480	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2481	void							submit				(SubmitContext& context);
2482	void							verify				(VerifyContext& context, size_t commandIndex);
2483
2484private:
2485	deInt32							m_imageWidth;
2486	deInt32							m_imageHeight;
2487	vk::Move<vk::VkImage>			m_dstImage;
2488	vk::Move<vk::VkDeviceMemory>	m_memory;
2489};
2490
2491void BufferCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
2492{
2493	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
2494}
2495
2496void BufferCopyToImage::prepare (PrepareContext& context)
2497{
2498	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2499	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2500	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2501	const vk::VkDevice				device			= context.getContext().getDevice();
2502	const vk::VkQueue				queue			= context.getContext().getQueue();
2503	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
2504	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2505	const IVec2						imageSize		= findImageSizeWxHx4(context.getBufferSize());
2506
2507	m_imageWidth	= imageSize[0];
2508	m_imageHeight	= imageSize[1];
2509
2510	{
2511		const vk::VkImageCreateInfo	createInfo =
2512		{
2513			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2514			DE_NULL,
2515
2516			0,
2517			vk::VK_IMAGE_TYPE_2D,
2518			vk::VK_FORMAT_R8G8B8A8_UNORM,
2519			{
2520				(deUint32)m_imageWidth,
2521				(deUint32)m_imageHeight,
2522				1u,
2523			},
2524			1, 1, // mipLevels, arrayLayers
2525			vk::VK_SAMPLE_COUNT_1_BIT,
2526
2527			vk::VK_IMAGE_TILING_OPTIMAL,
2528			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2529			vk::VK_SHARING_MODE_EXCLUSIVE,
2530
2531			(deUint32)queueFamilies.size(),
2532			&queueFamilies[0],
2533			vk::VK_IMAGE_LAYOUT_UNDEFINED
2534		};
2535
2536		m_dstImage = vk::createImage(vkd, device, &createInfo);
2537	}
2538
2539	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
2540
2541	{
2542		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2543		const vk::VkImageMemoryBarrier			barrier			=
2544		{
2545			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2546			DE_NULL,
2547
2548			0,
2549			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2550
2551			vk::VK_IMAGE_LAYOUT_UNDEFINED,
2552			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2553
2554			VK_QUEUE_FAMILY_IGNORED,
2555			VK_QUEUE_FAMILY_IGNORED,
2556
2557			*m_dstImage,
2558			{
2559				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2560				0,	// Mip level
2561				1,	// Mip level count
2562				0,	// Layer
2563				1	// Layer count
2564			}
2565		};
2566
2567		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2568
2569		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2570		queueRun(vkd, queue, *commandBuffer);
2571	}
2572}
2573
2574void BufferCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
2575{
2576	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
2577}
2578
2579void BufferCopyToImage::submit (SubmitContext& context)
2580{
2581	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2582	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2583	const vk::VkBufferImageCopy	region			=
2584	{
2585		0,
2586		0, 0,
2587		{
2588			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2589			0,	// mipLevel
2590			0,	// arrayLayer
2591			1	// layerCount
2592		},
2593		{ 0, 0, 0 },
2594		{
2595			(deUint32)m_imageWidth,
2596			(deUint32)m_imageHeight,
2597			1u
2598		}
2599	};
2600
2601	vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
2602}
2603
2604void BufferCopyToImage::verify (VerifyContext& context, size_t commandIndex)
2605{
2606	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2607	ReferenceMemory&						reference		(context.getReference());
2608	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
2609	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2610	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
2611	const vk::VkDevice						device			= context.getContext().getDevice();
2612	const vk::VkQueue						queue			= context.getContext().getQueue();
2613	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2614	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2615	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
2616	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2617	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2618	{
2619		const vk::VkImageMemoryBarrier		imageBarrier	=
2620		{
2621			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2622			DE_NULL,
2623
2624			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2625			vk::VK_ACCESS_TRANSFER_READ_BIT,
2626
2627			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2628			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2629
2630			VK_QUEUE_FAMILY_IGNORED,
2631			VK_QUEUE_FAMILY_IGNORED,
2632
2633			*m_dstImage,
2634			{
2635				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2636				0,	// Mip level
2637				1,	// Mip level count
2638				0,	// Layer
2639				1	// Layer count
2640			}
2641		};
2642		const vk::VkBufferMemoryBarrier bufferBarrier =
2643		{
2644			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2645			DE_NULL,
2646
2647			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2648			vk::VK_ACCESS_HOST_READ_BIT,
2649
2650			VK_QUEUE_FAMILY_IGNORED,
2651			VK_QUEUE_FAMILY_IGNORED,
2652			*dstBuffer,
2653			0,
2654			VK_WHOLE_SIZE
2655		};
2656
2657		const vk::VkBufferImageCopy	region =
2658		{
2659			0,
2660			0, 0,
2661			{
2662				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2663				0,	// mipLevel
2664				0,	// arrayLayer
2665				1	// layerCount
2666			},
2667			{ 0, 0, 0 },
2668			{
2669				(deUint32)m_imageWidth,
2670				(deUint32)m_imageHeight,
2671				1u
2672			}
2673		};
2674
2675		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
2676		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
2677		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2678	}
2679
2680	VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2681	queueRun(vkd, queue, *commandBuffer);
2682
2683	{
2684		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2685
2686		vk::invalidateMappedMemoryRange(vkd, device, *memory, 0,  4 * m_imageWidth * m_imageHeight);
2687
2688		{
2689			const deUint8* const	data = (const deUint8*)ptr;
2690
2691			for (size_t pos = 0; pos < (size_t)( 4 * m_imageWidth * m_imageHeight); pos++)
2692			{
2693				if (reference.isDefined(pos))
2694				{
2695					if (data[pos] != reference.get(pos))
2696					{
2697						resultCollector.fail(
2698								de::toString(commandIndex) + ":" + getName()
2699								+ " Result differs from reference, Expected: "
2700								+ de::toString(tcu::toHex<8>(reference.get(pos)))
2701								+ ", Got: "
2702								+ de::toString(tcu::toHex<8>(data[pos]))
2703								+ ", At offset: "
2704								+ de::toString(pos));
2705						break;
2706					}
2707				}
2708			}
2709		}
2710
2711		vkd.unmapMemory(device, *memory);
2712	}
2713}
2714
2715class BufferCopyFromImage : public CmdCommand
2716{
2717public:
2718									BufferCopyFromImage		(deUint32 seed) : m_seed(seed) {}
2719									~BufferCopyFromImage	(void) {}
2720	const char*						getName					(void) const { return "BufferCopyFromImage"; }
2721
2722	void							logPrepare				(TestLog& log, size_t commandIndex) const;
2723	void							prepare					(PrepareContext& context);
2724	void							logSubmit				(TestLog& log, size_t commandIndex) const;
2725	void							submit					(SubmitContext& context);
2726	void							verify					(VerifyContext& context, size_t commandIndex);
2727
2728private:
2729	const deUint32					m_seed;
2730	deInt32							m_imageWidth;
2731	deInt32							m_imageHeight;
2732	vk::Move<vk::VkImage>			m_srcImage;
2733	vk::Move<vk::VkDeviceMemory>	m_memory;
2734};
2735
2736void BufferCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
2737{
2738	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy." << TestLog::EndMessage;
2739}
2740
2741void BufferCopyFromImage::prepare (PrepareContext& context)
2742{
2743	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2744	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2745	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2746	const vk::VkDevice				device			= context.getContext().getDevice();
2747	const vk::VkQueue				queue			= context.getContext().getQueue();
2748	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
2749	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2750	const IVec2						imageSize		= findImageSizeWxHx4(context.getBufferSize());
2751
2752	m_imageWidth	= imageSize[0];
2753	m_imageHeight	= imageSize[1];
2754
2755	{
2756		const vk::VkImageCreateInfo	createInfo =
2757		{
2758			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2759			DE_NULL,
2760
2761			0,
2762			vk::VK_IMAGE_TYPE_2D,
2763			vk::VK_FORMAT_R8G8B8A8_UNORM,
2764			{
2765				(deUint32)m_imageWidth,
2766				(deUint32)m_imageHeight,
2767				1u,
2768			},
2769			1, 1, // mipLevels, arrayLayers
2770			vk::VK_SAMPLE_COUNT_1_BIT,
2771
2772			vk::VK_IMAGE_TILING_OPTIMAL,
2773			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2774			vk::VK_SHARING_MODE_EXCLUSIVE,
2775
2776			(deUint32)queueFamilies.size(),
2777			&queueFamilies[0],
2778			vk::VK_IMAGE_LAYOUT_UNDEFINED
2779		};
2780
2781		m_srcImage = vk::createImage(vkd, device, &createInfo);
2782	}
2783
2784	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
2785
2786	{
2787		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2788		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2789		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2790		const vk::VkImageMemoryBarrier			preImageBarrier	=
2791		{
2792			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2793			DE_NULL,
2794
2795			0,
2796			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2797
2798			vk::VK_IMAGE_LAYOUT_UNDEFINED,
2799			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2800
2801			VK_QUEUE_FAMILY_IGNORED,
2802			VK_QUEUE_FAMILY_IGNORED,
2803
2804			*m_srcImage,
2805			{
2806				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2807				0,	// Mip level
2808				1,	// Mip level count
2809				0,	// Layer
2810				1	// Layer count
2811			}
2812		};
2813		const vk::VkImageMemoryBarrier			postImageBarrier =
2814		{
2815			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2816			DE_NULL,
2817
2818			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2819			0,
2820
2821			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2822			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2823
2824			VK_QUEUE_FAMILY_IGNORED,
2825			VK_QUEUE_FAMILY_IGNORED,
2826
2827			*m_srcImage,
2828			{
2829				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2830				0,	// Mip level
2831				1,	// Mip level count
2832				0,	// Layer
2833				1	// Layer count
2834			}
2835		};
2836		const vk::VkBufferImageCopy				region				=
2837		{
2838			0,
2839			0, 0,
2840			{
2841				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2842				0,	// mipLevel
2843				0,	// arrayLayer
2844				1	// layerCount
2845			},
2846			{ 0, 0, 0 },
2847			{
2848				(deUint32)m_imageWidth,
2849				(deUint32)m_imageHeight,
2850				1u
2851			}
2852		};
2853
2854		{
2855			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2856			de::Random	rng	(m_seed);
2857
2858			{
2859				deUint8* const	data = (deUint8*)ptr;
2860
2861				for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2862					data[ndx] = rng.getUint8();
2863			}
2864
2865			vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2866			vkd.unmapMemory(device, *memory);
2867		}
2868
2869		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2870		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
2871		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2872
2873		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2874		queueRun(vkd, queue, *commandBuffer);
2875	}
2876}
2877
2878void BufferCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
2879{
2880	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from image" << TestLog::EndMessage;
2881}
2882
2883void BufferCopyFromImage::submit (SubmitContext& context)
2884{
2885	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2886	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2887	const vk::VkBufferImageCopy	region			=
2888	{
2889		0,
2890		0, 0,
2891		{
2892			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2893			0,	// mipLevel
2894			0,	// arrayLayer
2895			1	// layerCount
2896		},
2897		{ 0, 0, 0 },
2898		{
2899			(deUint32)m_imageWidth,
2900			(deUint32)m_imageHeight,
2901			1u
2902		}
2903	};
2904
2905	vkd.cmdCopyImageToBuffer(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getBuffer(), 1, &region);
2906}
2907
2908void BufferCopyFromImage::verify (VerifyContext& context, size_t)
2909{
2910	ReferenceMemory&	reference		(context.getReference());
2911	de::Random			rng	(m_seed);
2912
2913	for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2914		reference.set(ndx, rng.getUint8());
2915}
2916
2917class ImageCopyToBuffer : public CmdCommand
2918{
2919public:
2920									ImageCopyToBuffer	(vk::VkImageLayout imageLayout) : m_imageLayout (imageLayout) {}
2921									~ImageCopyToBuffer	(void) {}
2922	const char*						getName				(void) const { return "BufferCopyToImage"; }
2923
2924	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2925	void							prepare				(PrepareContext& context);
2926	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2927	void							submit				(SubmitContext& context);
2928	void							verify				(VerifyContext& context, size_t commandIndex);
2929
2930private:
2931	vk::VkImageLayout				m_imageLayout;
2932	vk::VkDeviceSize				m_bufferSize;
2933	vk::Move<vk::VkBuffer>			m_dstBuffer;
2934	vk::Move<vk::VkDeviceMemory>	m_memory;
2935	vk::VkDeviceSize				m_imageMemorySize;
2936	deInt32							m_imageWidth;
2937	deInt32							m_imageHeight;
2938};
2939
2940void ImageCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2941{
2942	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for image to buffer copy." << TestLog::EndMessage;
2943}
2944
2945void ImageCopyToBuffer::prepare (PrepareContext& context)
2946{
2947	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2948	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2949	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2950	const vk::VkDevice				device			= context.getContext().getDevice();
2951	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2952
2953	m_imageWidth		= context.getImageWidth();
2954	m_imageHeight		= context.getImageHeight();
2955	m_bufferSize		= 4 * m_imageWidth * m_imageHeight;
2956	m_imageMemorySize	= context.getImageMemorySize();
2957	m_dstBuffer			= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2958	m_memory			= bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2959}
2960
2961void ImageCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2962{
2963	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to buffer" << TestLog::EndMessage;
2964}
2965
2966void ImageCopyToBuffer::submit (SubmitContext& context)
2967{
2968	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2969	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2970	const vk::VkBufferImageCopy	region			=
2971	{
2972		0,
2973		0, 0,
2974		{
2975			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2976			0,	// mipLevel
2977			0,	// arrayLayer
2978			1	// layerCount
2979		},
2980		{ 0, 0, 0 },
2981		{
2982			(deUint32)m_imageWidth,
2983			(deUint32)m_imageHeight,
2984			1u
2985		}
2986	};
2987
2988	vkd.cmdCopyImageToBuffer(commandBuffer, context.getImage(), m_imageLayout, *m_dstBuffer, 1, &region);
2989}
2990
2991void ImageCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2992{
2993	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2994	ReferenceMemory&						reference		(context.getReference());
2995	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2996	const vk::VkDevice						device			= context.getContext().getDevice();
2997	const vk::VkQueue						queue			= context.getContext().getQueue();
2998	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2999	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3000	const vk::VkBufferMemoryBarrier			barrier			=
3001	{
3002		vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3003		DE_NULL,
3004
3005		vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3006		vk::VK_ACCESS_HOST_READ_BIT,
3007
3008		VK_QUEUE_FAMILY_IGNORED,
3009		VK_QUEUE_FAMILY_IGNORED,
3010		*m_dstBuffer,
3011		0,
3012		VK_WHOLE_SIZE
3013	};
3014
3015	vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3016
3017	VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3018	queueRun(vkd, queue, *commandBuffer);
3019
3020	reference.setUndefined(0, (size_t)m_imageMemorySize);
3021	{
3022		void* const						ptr				= mapMemory(vkd, device, *m_memory, m_bufferSize);
3023		const ConstPixelBufferAccess	referenceImage	(context.getReferenceImage().getAccess());
3024		const ConstPixelBufferAccess	resultImage		(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, ptr);
3025
3026		vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3027
3028		if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), referenceImage, resultImage, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3029			resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3030
3031		vkd.unmapMemory(device, *m_memory);
3032	}
3033}
3034
3035class ImageCopyFromBuffer : public CmdCommand
3036{
3037public:
3038									ImageCopyFromBuffer		(deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3039									~ImageCopyFromBuffer	(void) {}
3040	const char*						getName					(void) const { return "ImageCopyFromBuffer"; }
3041
3042	void							logPrepare				(TestLog& log, size_t commandIndex) const;
3043	void							prepare					(PrepareContext& context);
3044	void							logSubmit				(TestLog& log, size_t commandIndex) const;
3045	void							submit					(SubmitContext& context);
3046	void							verify					(VerifyContext& context, size_t commandIndex);
3047
3048private:
3049	const deUint32					m_seed;
3050	const vk::VkImageLayout			m_imageLayout;
3051	deInt32							m_imageWidth;
3052	deInt32							m_imageHeight;
3053	vk::VkDeviceSize				m_imageMemorySize;
3054	vk::VkDeviceSize				m_bufferSize;
3055	vk::Move<vk::VkBuffer>			m_srcBuffer;
3056	vk::Move<vk::VkDeviceMemory>	m_memory;
3057};
3058
3059void ImageCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
3060{
3061	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to image copy. Seed: " << m_seed << TestLog::EndMessage;
3062}
3063
3064void ImageCopyFromBuffer::prepare (PrepareContext& context)
3065{
3066	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3067	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3068	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3069	const vk::VkDevice				device			= context.getContext().getDevice();
3070	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3071
3072	m_imageWidth		= context.getImageHeight();
3073	m_imageHeight		= context.getImageWidth();
3074	m_imageMemorySize	= context.getImageMemorySize();
3075	m_bufferSize		= m_imageWidth * m_imageHeight * 4;
3076	m_srcBuffer			= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3077	m_memory			= bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3078
3079	{
3080		void* const	ptr	= mapMemory(vkd, device, *m_memory, m_bufferSize);
3081		de::Random	rng	(m_seed);
3082
3083		{
3084			deUint8* const	data = (deUint8*)ptr;
3085
3086			for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
3087				data[ndx] = rng.getUint8();
3088		}
3089
3090		vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3091		vkd.unmapMemory(device, *m_memory);
3092	}
3093}
3094
3095void ImageCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
3096{
3097	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from buffer" << TestLog::EndMessage;
3098}
3099
3100void ImageCopyFromBuffer::submit (SubmitContext& context)
3101{
3102	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3103	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3104	const vk::VkBufferImageCopy	region			=
3105	{
3106		0,
3107		0, 0,
3108		{
3109			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3110			0,	// mipLevel
3111			0,	// arrayLayer
3112			1	// layerCount
3113		},
3114		{ 0, 0, 0 },
3115		{
3116			(deUint32)m_imageWidth,
3117			(deUint32)m_imageHeight,
3118			1u
3119		}
3120	};
3121
3122	vkd.cmdCopyBufferToImage(commandBuffer, *m_srcBuffer, context.getImage(), m_imageLayout, 1, &region);
3123}
3124
3125void ImageCopyFromBuffer::verify (VerifyContext& context, size_t)
3126{
3127	ReferenceMemory&	reference	(context.getReference());
3128	de::Random			rng			(m_seed);
3129
3130	reference.setUndefined(0, (size_t)m_imageMemorySize);
3131
3132	{
3133		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3134
3135		for (deInt32 y = 0; y < m_imageHeight; y++)
3136		for (deInt32 x = 0; x < m_imageWidth; x++)
3137		{
3138			const deUint8 r8 = rng.getUint8();
3139			const deUint8 g8 = rng.getUint8();
3140			const deUint8 b8 = rng.getUint8();
3141			const deUint8 a8 = rng.getUint8();
3142
3143			refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3144		}
3145	}
3146}
3147
3148class ImageCopyFromImage : public CmdCommand
3149{
3150public:
3151									ImageCopyFromImage	(deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3152									~ImageCopyFromImage	(void) {}
3153	const char*						getName				(void) const { return "ImageCopyFromImage"; }
3154
3155	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3156	void							prepare				(PrepareContext& context);
3157	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3158	void							submit				(SubmitContext& context);
3159	void							verify				(VerifyContext& context, size_t commandIndex);
3160
3161private:
3162	const deUint32					m_seed;
3163	const vk::VkImageLayout			m_imageLayout;
3164	deInt32							m_imageWidth;
3165	deInt32							m_imageHeight;
3166	vk::VkDeviceSize				m_imageMemorySize;
3167	vk::Move<vk::VkImage>			m_srcImage;
3168	vk::Move<vk::VkDeviceMemory>	m_memory;
3169};
3170
3171void ImageCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3172{
3173	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image copy." << TestLog::EndMessage;
3174}
3175
3176void ImageCopyFromImage::prepare (PrepareContext& context)
3177{
3178	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3179	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3180	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3181	const vk::VkDevice				device			= context.getContext().getDevice();
3182	const vk::VkQueue				queue			= context.getContext().getQueue();
3183	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3184	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3185
3186	m_imageWidth		= context.getImageWidth();
3187	m_imageHeight		= context.getImageHeight();
3188	m_imageMemorySize	= context.getImageMemorySize();
3189
3190	{
3191		const vk::VkImageCreateInfo	createInfo =
3192		{
3193			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3194			DE_NULL,
3195
3196			0,
3197			vk::VK_IMAGE_TYPE_2D,
3198			vk::VK_FORMAT_R8G8B8A8_UNORM,
3199			{
3200				(deUint32)m_imageWidth,
3201				(deUint32)m_imageHeight,
3202				1u,
3203			},
3204			1, 1, // mipLevels, arrayLayers
3205			vk::VK_SAMPLE_COUNT_1_BIT,
3206
3207			vk::VK_IMAGE_TILING_OPTIMAL,
3208			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3209			vk::VK_SHARING_MODE_EXCLUSIVE,
3210
3211			(deUint32)queueFamilies.size(),
3212			&queueFamilies[0],
3213			vk::VK_IMAGE_LAYOUT_UNDEFINED
3214		};
3215
3216		m_srcImage = vk::createImage(vkd, device, &createInfo);
3217	}
3218
3219	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3220
3221	{
3222		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3223		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3224		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3225		const vk::VkImageMemoryBarrier			preImageBarrier	=
3226		{
3227			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3228			DE_NULL,
3229
3230			0,
3231			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3232
3233			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3234			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3235
3236			VK_QUEUE_FAMILY_IGNORED,
3237			VK_QUEUE_FAMILY_IGNORED,
3238
3239			*m_srcImage,
3240			{
3241				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3242				0,	// Mip level
3243				1,	// Mip level count
3244				0,	// Layer
3245				1	// Layer count
3246			}
3247		};
3248		const vk::VkImageMemoryBarrier			postImageBarrier =
3249		{
3250			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3251			DE_NULL,
3252
3253			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3254			0,
3255
3256			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3257			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3258
3259			VK_QUEUE_FAMILY_IGNORED,
3260			VK_QUEUE_FAMILY_IGNORED,
3261
3262			*m_srcImage,
3263			{
3264				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3265				0,	// Mip level
3266				1,	// Mip level count
3267				0,	// Layer
3268				1	// Layer count
3269			}
3270		};
3271		const vk::VkBufferImageCopy				region				=
3272		{
3273			0,
3274			0, 0,
3275			{
3276				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3277				0,	// mipLevel
3278				0,	// arrayLayer
3279				1	// layerCount
3280			},
3281			{ 0, 0, 0 },
3282			{
3283				(deUint32)m_imageWidth,
3284				(deUint32)m_imageHeight,
3285				1u
3286			}
3287		};
3288
3289		{
3290			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3291			de::Random	rng	(m_seed);
3292
3293			{
3294				deUint8* const	data = (deUint8*)ptr;
3295
3296				for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3297					data[ndx] = rng.getUint8();
3298			}
3299
3300			vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3301			vkd.unmapMemory(device, *memory);
3302		}
3303
3304		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3305		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3306		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3307
3308		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3309		queueRun(vkd, queue, *commandBuffer);
3310	}
3311}
3312
3313void ImageCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3314{
3315	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from another image" << TestLog::EndMessage;
3316}
3317
3318void ImageCopyFromImage::submit (SubmitContext& context)
3319{
3320	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3321	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3322	const vk::VkImageCopy		region			=
3323	{
3324		{
3325			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3326			0,	// mipLevel
3327			0,	// arrayLayer
3328			1	// layerCount
3329		},
3330		{ 0, 0, 0 },
3331
3332		{
3333			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3334			0,	// mipLevel
3335			0,	// arrayLayer
3336			1	// layerCount
3337		},
3338		{ 0, 0, 0 },
3339		{
3340			(deUint32)m_imageWidth,
3341			(deUint32)m_imageHeight,
3342			1u
3343		}
3344	};
3345
3346	vkd.cmdCopyImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, &region);
3347}
3348
3349void ImageCopyFromImage::verify (VerifyContext& context, size_t)
3350{
3351	ReferenceMemory&	reference	(context.getReference());
3352	de::Random			rng			(m_seed);
3353
3354	reference.setUndefined(0, (size_t)m_imageMemorySize);
3355
3356	{
3357		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3358
3359		for (deInt32 y = 0; y < m_imageHeight; y++)
3360		for (deInt32 x = 0; x < m_imageWidth; x++)
3361		{
3362			const deUint8 r8 = rng.getUint8();
3363			const deUint8 g8 = rng.getUint8();
3364			const deUint8 b8 = rng.getUint8();
3365			const deUint8 a8 = rng.getUint8();
3366
3367			refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3368		}
3369	}
3370}
3371
3372class ImageCopyToImage : public CmdCommand
3373{
3374public:
3375									ImageCopyToImage	(vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout) {}
3376									~ImageCopyToImage	(void) {}
3377	const char*						getName				(void) const { return "ImageCopyToImage"; }
3378
3379	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3380	void							prepare				(PrepareContext& context);
3381	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3382	void							submit				(SubmitContext& context);
3383	void							verify				(VerifyContext& context, size_t commandIndex);
3384
3385private:
3386	const vk::VkImageLayout			m_imageLayout;
3387	deInt32							m_imageWidth;
3388	deInt32							m_imageHeight;
3389	vk::VkDeviceSize				m_imageMemorySize;
3390	vk::Move<vk::VkImage>			m_dstImage;
3391	vk::Move<vk::VkDeviceMemory>	m_memory;
3392};
3393
3394void ImageCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
3395{
3396	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image copy." << TestLog::EndMessage;
3397}
3398
3399void ImageCopyToImage::prepare (PrepareContext& context)
3400{
3401	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3402	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3403	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3404	const vk::VkDevice				device			= context.getContext().getDevice();
3405	const vk::VkQueue				queue			= context.getContext().getQueue();
3406	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3407	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3408
3409	m_imageWidth		= context.getImageWidth();
3410	m_imageHeight		= context.getImageHeight();
3411	m_imageMemorySize	= context.getImageMemorySize();
3412
3413	{
3414		const vk::VkImageCreateInfo	createInfo =
3415		{
3416			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3417			DE_NULL,
3418
3419			0,
3420			vk::VK_IMAGE_TYPE_2D,
3421			vk::VK_FORMAT_R8G8B8A8_UNORM,
3422			{
3423				(deUint32)m_imageWidth,
3424				(deUint32)m_imageHeight,
3425				1u,
3426			},
3427			1, 1, // mipLevels, arrayLayers
3428			vk::VK_SAMPLE_COUNT_1_BIT,
3429
3430			vk::VK_IMAGE_TILING_OPTIMAL,
3431			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3432			vk::VK_SHARING_MODE_EXCLUSIVE,
3433
3434			(deUint32)queueFamilies.size(),
3435			&queueFamilies[0],
3436			vk::VK_IMAGE_LAYOUT_UNDEFINED
3437		};
3438
3439		m_dstImage = vk::createImage(vkd, device, &createInfo);
3440	}
3441
3442	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3443
3444	{
3445		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3446		const vk::VkImageMemoryBarrier			barrier			=
3447		{
3448			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3449			DE_NULL,
3450
3451			0,
3452			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3453
3454			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3455			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3456
3457			VK_QUEUE_FAMILY_IGNORED,
3458			VK_QUEUE_FAMILY_IGNORED,
3459
3460			*m_dstImage,
3461			{
3462				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3463				0,	// Mip level
3464				1,	// Mip level count
3465				0,	// Layer
3466				1	// Layer count
3467			}
3468		};
3469
3470		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3471
3472		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3473		queueRun(vkd, queue, *commandBuffer);
3474	}
3475}
3476
3477void ImageCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
3478{
3479	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to another image" << TestLog::EndMessage;
3480}
3481
3482void ImageCopyToImage::submit (SubmitContext& context)
3483{
3484	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3485	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3486	const vk::VkImageCopy		region			=
3487	{
3488		{
3489			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3490			0,	// mipLevel
3491			0,	// arrayLayer
3492			1	// layerCount
3493		},
3494		{ 0, 0, 0 },
3495
3496		{
3497			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3498			0,	// mipLevel
3499			0,	// arrayLayer
3500			1	// layerCount
3501		},
3502		{ 0, 0, 0 },
3503		{
3504			(deUint32)m_imageWidth,
3505			(deUint32)m_imageHeight,
3506			1u
3507		}
3508	};
3509
3510	vkd.cmdCopyImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3511}
3512
3513void ImageCopyToImage::verify (VerifyContext& context, size_t commandIndex)
3514{
3515	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
3516	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
3517	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
3518	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
3519	const vk::VkDevice						device			= context.getContext().getDevice();
3520	const vk::VkQueue						queue			= context.getContext().getQueue();
3521	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
3522	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3523	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
3524	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3525	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3526	{
3527		const vk::VkImageMemoryBarrier		imageBarrier	=
3528		{
3529			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3530			DE_NULL,
3531
3532			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3533			vk::VK_ACCESS_TRANSFER_READ_BIT,
3534
3535			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3536			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3537
3538			VK_QUEUE_FAMILY_IGNORED,
3539			VK_QUEUE_FAMILY_IGNORED,
3540
3541			*m_dstImage,
3542			{
3543				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3544				0,	// Mip level
3545				1,	// Mip level count
3546				0,	// Layer
3547				1	// Layer count
3548			}
3549		};
3550		const vk::VkBufferMemoryBarrier bufferBarrier =
3551		{
3552			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3553			DE_NULL,
3554
3555			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3556			vk::VK_ACCESS_HOST_READ_BIT,
3557
3558			VK_QUEUE_FAMILY_IGNORED,
3559			VK_QUEUE_FAMILY_IGNORED,
3560			*dstBuffer,
3561			0,
3562			VK_WHOLE_SIZE
3563		};
3564		const vk::VkBufferImageCopy	region =
3565		{
3566			0,
3567			0, 0,
3568			{
3569				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3570				0,	// mipLevel
3571				0,	// arrayLayer
3572				1	// layerCount
3573			},
3574			{ 0, 0, 0 },
3575			{
3576				(deUint32)m_imageWidth,
3577				(deUint32)m_imageHeight,
3578				1u
3579			}
3580		};
3581
3582		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
3583		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
3584		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3585	}
3586
3587	VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3588	queueRun(vkd, queue, *commandBuffer);
3589
3590	{
3591		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3592
3593		vk::invalidateMappedMemoryRange(vkd, device, *memory, 0,  4 * m_imageWidth * m_imageHeight);
3594
3595		{
3596			const deUint8* const			data		= (const deUint8*)ptr;
3597			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, data);
3598			const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3599
3600			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3601				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3602		}
3603
3604		vkd.unmapMemory(device, *memory);
3605	}
3606}
3607
3608enum BlitScale
3609{
3610	BLIT_SCALE_20,
3611	BLIT_SCALE_10,
3612};
3613
3614class ImageBlitFromImage : public CmdCommand
3615{
3616public:
3617									ImageBlitFromImage	(deUint32 seed, BlitScale scale, vk::VkImageLayout imageLayout) : m_seed(seed), m_scale(scale), m_imageLayout(imageLayout) {}
3618									~ImageBlitFromImage	(void) {}
3619	const char*						getName				(void) const { return "ImageBlitFromImage"; }
3620
3621	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3622	void							prepare				(PrepareContext& context);
3623	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3624	void							submit				(SubmitContext& context);
3625	void							verify				(VerifyContext& context, size_t commandIndex);
3626
3627private:
3628	const deUint32					m_seed;
3629	const BlitScale					m_scale;
3630	const vk::VkImageLayout			m_imageLayout;
3631	deInt32							m_imageWidth;
3632	deInt32							m_imageHeight;
3633	vk::VkDeviceSize				m_imageMemorySize;
3634	deInt32							m_srcImageWidth;
3635	deInt32							m_srcImageHeight;
3636	vk::Move<vk::VkImage>			m_srcImage;
3637	vk::Move<vk::VkDeviceMemory>	m_memory;
3638};
3639
3640void ImageBlitFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3641{
3642	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image blit." << TestLog::EndMessage;
3643}
3644
3645void ImageBlitFromImage::prepare (PrepareContext& context)
3646{
3647	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3648	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3649	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3650	const vk::VkDevice				device			= context.getContext().getDevice();
3651	const vk::VkQueue				queue			= context.getContext().getQueue();
3652	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3653	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3654
3655	m_imageWidth		= context.getImageWidth();
3656	m_imageHeight		= context.getImageHeight();
3657	m_imageMemorySize	= context.getImageMemorySize();
3658
3659	if (m_scale == BLIT_SCALE_10)
3660	{
3661		m_srcImageWidth			= m_imageWidth;
3662		m_srcImageHeight		= m_imageHeight;
3663	}
3664	else if (m_scale == BLIT_SCALE_20)
3665	{
3666		m_srcImageWidth			= m_imageWidth / 2;
3667		m_srcImageHeight		= m_imageHeight / 2;
3668	}
3669	else
3670		DE_FATAL("Unsupported scale");
3671
3672	{
3673		const vk::VkImageCreateInfo	createInfo =
3674		{
3675			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3676			DE_NULL,
3677
3678			0,
3679			vk::VK_IMAGE_TYPE_2D,
3680			vk::VK_FORMAT_R8G8B8A8_UNORM,
3681			{
3682				(deUint32)m_srcImageWidth,
3683				(deUint32)m_srcImageHeight,
3684				1u,
3685			},
3686			1, 1, // mipLevels, arrayLayers
3687			vk::VK_SAMPLE_COUNT_1_BIT,
3688
3689			vk::VK_IMAGE_TILING_OPTIMAL,
3690			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3691			vk::VK_SHARING_MODE_EXCLUSIVE,
3692
3693			(deUint32)queueFamilies.size(),
3694			&queueFamilies[0],
3695			vk::VK_IMAGE_LAYOUT_UNDEFINED
3696		};
3697
3698		m_srcImage = vk::createImage(vkd, device, &createInfo);
3699	}
3700
3701	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3702
3703	{
3704		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_srcImageWidth * m_srcImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3705		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3706		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3707		const vk::VkImageMemoryBarrier			preImageBarrier	=
3708		{
3709			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3710			DE_NULL,
3711
3712			0,
3713			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3714
3715			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3716			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3717
3718			VK_QUEUE_FAMILY_IGNORED,
3719			VK_QUEUE_FAMILY_IGNORED,
3720
3721			*m_srcImage,
3722			{
3723				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3724				0,	// Mip level
3725				1,	// Mip level count
3726				0,	// Layer
3727				1	// Layer count
3728			}
3729		};
3730		const vk::VkImageMemoryBarrier			postImageBarrier =
3731		{
3732			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3733			DE_NULL,
3734
3735			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3736			0,
3737
3738			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3739			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3740
3741			VK_QUEUE_FAMILY_IGNORED,
3742			VK_QUEUE_FAMILY_IGNORED,
3743
3744			*m_srcImage,
3745			{
3746				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3747				0,	// Mip level
3748				1,	// Mip level count
3749				0,	// Layer
3750				1	// Layer count
3751			}
3752		};
3753		const vk::VkBufferImageCopy				region				=
3754		{
3755			0,
3756			0, 0,
3757			{
3758				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3759				0,	// mipLevel
3760				0,	// arrayLayer
3761				1	// layerCount
3762			},
3763			{ 0, 0, 0 },
3764			{
3765				(deUint32)m_srcImageWidth,
3766				(deUint32)m_srcImageHeight,
3767				1u
3768			}
3769		};
3770
3771		{
3772			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_srcImageWidth * m_srcImageHeight);
3773			de::Random	rng	(m_seed);
3774
3775			{
3776				deUint8* const	data = (deUint8*)ptr;
3777
3778				for (size_t ndx = 0; ndx < (size_t)(4 * m_srcImageWidth * m_srcImageHeight); ndx++)
3779					data[ndx] = rng.getUint8();
3780			}
3781
3782			vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_srcImageWidth * m_srcImageHeight);
3783			vkd.unmapMemory(device, *memory);
3784		}
3785
3786		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3787		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3788		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3789
3790		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3791		queueRun(vkd, queue, *commandBuffer);
3792	}
3793}
3794
3795void ImageBlitFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3796{
3797	log << TestLog::Message << commandIndex << ":" << getName() << " Blit from another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "")  << TestLog::EndMessage;
3798}
3799
3800void ImageBlitFromImage::submit (SubmitContext& context)
3801{
3802	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3803	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3804	const vk::VkImageBlit		region			=
3805	{
3806		// Src
3807		{
3808			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3809			0,	// mipLevel
3810			0,	// arrayLayer
3811			1	// layerCount
3812		},
3813		{
3814			{ 0, 0, 0 },
3815			{
3816				m_srcImageWidth,
3817				m_srcImageHeight,
3818				1
3819			},
3820		},
3821
3822		// Dst
3823		{
3824			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3825			0,	// mipLevel
3826			0,	// arrayLayer
3827			1	// layerCount
3828		},
3829		{
3830			{ 0, 0, 0 },
3831			{
3832				m_imageWidth,
3833				m_imageHeight,
3834				1u
3835			}
3836		}
3837	};
3838	vkd.cmdBlitImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, &region, vk::VK_FILTER_NEAREST);
3839}
3840
3841void ImageBlitFromImage::verify (VerifyContext& context, size_t)
3842{
3843	ReferenceMemory&	reference	(context.getReference());
3844	de::Random			rng			(m_seed);
3845
3846	reference.setUndefined(0, (size_t)m_imageMemorySize);
3847
3848	{
3849		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3850
3851		if (m_scale == BLIT_SCALE_10)
3852		{
3853			for (deInt32 y = 0; y < m_imageHeight; y++)
3854			for (deInt32 x = 0; x < m_imageWidth; x++)
3855			{
3856				const deUint8 r8 = rng.getUint8();
3857				const deUint8 g8 = rng.getUint8();
3858				const deUint8 b8 = rng.getUint8();
3859				const deUint8 a8 = rng.getUint8();
3860
3861				refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3862			}
3863		}
3864		else if (m_scale == BLIT_SCALE_20)
3865		{
3866			tcu::TextureLevel	source	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_srcImageWidth, m_srcImageHeight);
3867			const float			xscale	= ((float)m_srcImageWidth)  / (float)m_imageWidth;
3868			const float			yscale	= ((float)m_srcImageHeight) / (float)m_imageHeight;
3869
3870			for (deInt32 y = 0; y < m_srcImageHeight; y++)
3871			for (deInt32 x = 0; x < m_srcImageWidth; x++)
3872			{
3873				const deUint8 r8 = rng.getUint8();
3874				const deUint8 g8 = rng.getUint8();
3875				const deUint8 b8 = rng.getUint8();
3876				const deUint8 a8 = rng.getUint8();
3877
3878				source.getAccess().setPixel(UVec4(r8, g8, b8, a8), x, y);
3879			}
3880
3881			for (deInt32 y = 0; y < m_imageHeight; y++)
3882			for (deInt32 x = 0; x < m_imageWidth; x++)
3883				refAccess.setPixel(source.getAccess().getPixelUint(int((float(x) + 0.5f) * xscale), int((float(y) + 0.5f) * yscale)), x, y);
3884		}
3885		else
3886			DE_FATAL("Unsupported scale");
3887	}
3888}
3889
3890class ImageBlitToImage : public CmdCommand
3891{
3892public:
3893									ImageBlitToImage	(BlitScale scale, vk::VkImageLayout imageLayout) : m_scale(scale), m_imageLayout(imageLayout) {}
3894									~ImageBlitToImage	(void) {}
3895	const char*						getName				(void) const { return "ImageBlitToImage"; }
3896
3897	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3898	void							prepare				(PrepareContext& context);
3899	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3900	void							submit				(SubmitContext& context);
3901	void							verify				(VerifyContext& context, size_t commandIndex);
3902
3903private:
3904	const BlitScale					m_scale;
3905	const vk::VkImageLayout			m_imageLayout;
3906	deInt32							m_imageWidth;
3907	deInt32							m_imageHeight;
3908	vk::VkDeviceSize				m_imageMemorySize;
3909	deInt32							m_dstImageWidth;
3910	deInt32							m_dstImageHeight;
3911	vk::Move<vk::VkImage>			m_dstImage;
3912	vk::Move<vk::VkDeviceMemory>	m_memory;
3913};
3914
3915void ImageBlitToImage::logPrepare (TestLog& log, size_t commandIndex) const
3916{
3917	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image blit." << TestLog::EndMessage;
3918}
3919
3920void ImageBlitToImage::prepare (PrepareContext& context)
3921{
3922	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3923	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3924	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3925	const vk::VkDevice				device			= context.getContext().getDevice();
3926	const vk::VkQueue				queue			= context.getContext().getQueue();
3927	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3928	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3929
3930	m_imageWidth		= context.getImageWidth();
3931	m_imageHeight		= context.getImageHeight();
3932	m_imageMemorySize	= context.getImageMemorySize();
3933
3934	if (m_scale == BLIT_SCALE_10)
3935	{
3936		m_dstImageWidth		= context.getImageWidth();
3937		m_dstImageHeight	= context.getImageHeight();
3938	}
3939	else if (m_scale == BLIT_SCALE_20)
3940	{
3941		m_dstImageWidth		= context.getImageWidth() * 2;
3942		m_dstImageHeight	= context.getImageHeight() * 2;
3943	}
3944	else
3945		DE_FATAL("Unsupportd blit scale");
3946
3947	{
3948		const vk::VkImageCreateInfo	createInfo =
3949		{
3950			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3951			DE_NULL,
3952
3953			0,
3954			vk::VK_IMAGE_TYPE_2D,
3955			vk::VK_FORMAT_R8G8B8A8_UNORM,
3956			{
3957				(deUint32)m_dstImageWidth,
3958				(deUint32)m_dstImageHeight,
3959				1u,
3960			},
3961			1, 1, // mipLevels, arrayLayers
3962			vk::VK_SAMPLE_COUNT_1_BIT,
3963
3964			vk::VK_IMAGE_TILING_OPTIMAL,
3965			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3966			vk::VK_SHARING_MODE_EXCLUSIVE,
3967
3968			(deUint32)queueFamilies.size(),
3969			&queueFamilies[0],
3970			vk::VK_IMAGE_LAYOUT_UNDEFINED
3971		};
3972
3973		m_dstImage = vk::createImage(vkd, device, &createInfo);
3974	}
3975
3976	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3977
3978	{
3979		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3980		const vk::VkImageMemoryBarrier			barrier			=
3981		{
3982			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3983			DE_NULL,
3984
3985			0,
3986			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3987
3988			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3989			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3990
3991			VK_QUEUE_FAMILY_IGNORED,
3992			VK_QUEUE_FAMILY_IGNORED,
3993
3994			*m_dstImage,
3995			{
3996				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3997				0,	// Mip level
3998				1,	// Mip level count
3999				0,	// Layer
4000				1	// Layer count
4001			}
4002		};
4003
4004		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
4005
4006		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4007		queueRun(vkd, queue, *commandBuffer);
4008	}
4009}
4010
4011void ImageBlitToImage::logSubmit (TestLog& log, size_t commandIndex) const
4012{
4013	log << TestLog::Message << commandIndex << ":" << getName() << " Blit image to another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "")  << TestLog::EndMessage;
4014}
4015
4016void ImageBlitToImage::submit (SubmitContext& context)
4017{
4018	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4019	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4020	const vk::VkImageBlit		region			=
4021	{
4022		// Src
4023		{
4024			vk::VK_IMAGE_ASPECT_COLOR_BIT,
4025			0,	// mipLevel
4026			0,	// arrayLayer
4027			1	// layerCount
4028		},
4029		{
4030			{ 0, 0, 0 },
4031			{
4032				m_imageWidth,
4033				m_imageHeight,
4034				1
4035			},
4036		},
4037
4038		// Dst
4039		{
4040			vk::VK_IMAGE_ASPECT_COLOR_BIT,
4041			0,	// mipLevel
4042			0,	// arrayLayer
4043			1	// layerCount
4044		},
4045		{
4046			{ 0, 0, 0 },
4047			{
4048				m_dstImageWidth,
4049				m_dstImageHeight,
4050				1u
4051			}
4052		}
4053	};
4054	vkd.cmdBlitImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region, vk::VK_FILTER_NEAREST);
4055}
4056
4057void ImageBlitToImage::verify (VerifyContext& context, size_t commandIndex)
4058{
4059	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
4060	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4061	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4062	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4063	const vk::VkDevice						device			= context.getContext().getDevice();
4064	const vk::VkQueue						queue			= context.getContext().getQueue();
4065	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
4066	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4067	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4068	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_dstImageWidth * m_dstImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4069	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4070	{
4071		const vk::VkImageMemoryBarrier		imageBarrier	=
4072		{
4073			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4074			DE_NULL,
4075
4076			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4077			vk::VK_ACCESS_TRANSFER_READ_BIT,
4078
4079			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4080			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4081
4082			VK_QUEUE_FAMILY_IGNORED,
4083			VK_QUEUE_FAMILY_IGNORED,
4084
4085			*m_dstImage,
4086			{
4087				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4088				0,	// Mip level
4089				1,	// Mip level count
4090				0,	// Layer
4091				1	// Layer count
4092			}
4093		};
4094		const vk::VkBufferMemoryBarrier bufferBarrier =
4095		{
4096			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4097			DE_NULL,
4098
4099			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4100			vk::VK_ACCESS_HOST_READ_BIT,
4101
4102			VK_QUEUE_FAMILY_IGNORED,
4103			VK_QUEUE_FAMILY_IGNORED,
4104			*dstBuffer,
4105			0,
4106			VK_WHOLE_SIZE
4107		};
4108		const vk::VkBufferImageCopy	region =
4109		{
4110			0,
4111			0, 0,
4112			{
4113				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4114				0,	// mipLevel
4115				0,	// arrayLayer
4116				1	// layerCount
4117			},
4118			{ 0, 0, 0 },
4119			{
4120				(deUint32)m_dstImageWidth,
4121				(deUint32)m_dstImageHeight,
4122				1
4123			}
4124		};
4125
4126		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4127		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
4128		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4129	}
4130
4131	VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4132	queueRun(vkd, queue, *commandBuffer);
4133
4134	{
4135		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_dstImageWidth * m_dstImageHeight);
4136
4137		vk::invalidateMappedMemoryRange(vkd, device, *memory, 0,  4 * m_dstImageWidth * m_dstImageHeight);
4138
4139		if (m_scale == BLIT_SCALE_10)
4140		{
4141			const deUint8* const			data		= (const deUint8*)ptr;
4142			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4143			const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
4144
4145			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4146				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4147		}
4148		else if (m_scale == BLIT_SCALE_20)
4149		{
4150			const deUint8* const			data		= (const deUint8*)ptr;
4151			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4152			tcu::TextureLevel				reference	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1);
4153
4154			{
4155				const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
4156
4157				for (deInt32 y = 0; y < m_dstImageHeight; y++)
4158				for (deInt32 x = 0; x < m_dstImageWidth; x++)
4159				{
4160					reference.getAccess().setPixel(refAccess.getPixel(x/2, y/2), x, y);
4161				}
4162			}
4163
4164			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), reference.getAccess(), resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4165				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4166		}
4167		else
4168			DE_FATAL("Unknown scale");
4169
4170		vkd.unmapMemory(device, *memory);
4171	}
4172}
4173
4174class PrepareRenderPassContext
4175{
4176public:
4177								PrepareRenderPassContext	(PrepareContext&	context,
4178															 vk::VkRenderPass	renderPass,
4179															 vk::VkFramebuffer	framebuffer,
4180															 deInt32			targetWidth,
4181															 deInt32			targetHeight)
4182		: m_context			(context)
4183		, m_renderPass		(renderPass)
4184		, m_framebuffer		(framebuffer)
4185		, m_targetWidth		(targetWidth)
4186		, m_targetHeight	(targetHeight)
4187	{
4188	}
4189
4190	const Memory&				getMemory					(void) const { return m_context.getMemory(); }
4191	const Context&				getContext					(void) const { return m_context.getContext(); }
4192	const vk::BinaryCollection&	getBinaryCollection			(void) const { return m_context.getBinaryCollection(); }
4193
4194	vk::VkBuffer				getBuffer					(void) const { return m_context.getBuffer(); }
4195	vk::VkDeviceSize			getBufferSize				(void) const { return m_context.getBufferSize(); }
4196
4197	vk::VkImage					getImage					(void) const { return m_context.getImage(); }
4198	deInt32						getImageWidth				(void) const { return m_context.getImageWidth(); }
4199	deInt32						getImageHeight				(void) const { return m_context.getImageHeight(); }
4200	vk::VkImageLayout			getImageLayout				(void) const { return m_context.getImageLayout(); }
4201
4202	deInt32						getTargetWidth				(void) const { return m_targetWidth; }
4203	deInt32						getTargetHeight				(void) const { return m_targetHeight; }
4204
4205	vk::VkRenderPass			getRenderPass				(void) const { return m_renderPass; }
4206
4207private:
4208	PrepareContext&				m_context;
4209	const vk::VkRenderPass		m_renderPass;
4210	const vk::VkFramebuffer		m_framebuffer;
4211	const deInt32				m_targetWidth;
4212	const deInt32				m_targetHeight;
4213};
4214
4215class VerifyRenderPassContext
4216{
4217public:
4218							VerifyRenderPassContext		(VerifyContext&			context,
4219														 deInt32				targetWidth,
4220														 deInt32				targetHeight)
4221		: m_context			(context)
4222		, m_referenceTarget	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), targetWidth, targetHeight)
4223	{
4224	}
4225
4226	const Context&			getContext			(void) const { return m_context.getContext(); }
4227	TestLog&				getLog				(void) const { return m_context.getLog(); }
4228	tcu::ResultCollector&	getResultCollector	(void) const { return m_context.getResultCollector(); }
4229
4230	TextureLevel&			getReferenceTarget	(void) { return m_referenceTarget; }
4231
4232	ReferenceMemory&		getReference		(void) { return m_context.getReference(); }
4233	TextureLevel&			getReferenceImage	(void) { return m_context.getReferenceImage();}
4234
4235private:
4236	VerifyContext&	m_context;
4237	TextureLevel	m_referenceTarget;
4238};
4239
4240class RenderPassCommand
4241{
4242public:
4243	virtual				~RenderPassCommand	(void) {}
4244	virtual const char*	getName				(void) const = 0;
4245
4246	// Log things that are done during prepare
4247	virtual void		logPrepare			(TestLog&, size_t) const {}
4248	// Log submitted calls etc.
4249	virtual void		logSubmit			(TestLog&, size_t) const {}
4250
4251	// Allocate vulkan resources and prepare for submit.
4252	virtual void		prepare				(PrepareRenderPassContext&) {}
4253
4254	// Submit commands to command buffer.
4255	virtual void		submit				(SubmitContext&) {}
4256
4257	// Verify results
4258	virtual void		verify				(VerifyRenderPassContext&, size_t) {}
4259};
4260
4261class SubmitRenderPass : public CmdCommand
4262{
4263public:
4264				SubmitRenderPass	(const vector<RenderPassCommand*>& commands);
4265				~SubmitRenderPass	(void);
4266	const char*	getName				(void) const { return "SubmitRenderPass"; }
4267
4268	void		logPrepare			(TestLog&, size_t) const;
4269	void		logSubmit			(TestLog&, size_t) const;
4270
4271	void		prepare				(PrepareContext&);
4272	void		submit				(SubmitContext&);
4273
4274	void		verify				(VerifyContext&, size_t);
4275
4276private:
4277	const deInt32					m_targetWidth;
4278	const deInt32					m_targetHeight;
4279	vk::Move<vk::VkRenderPass>		m_renderPass;
4280	vk::Move<vk::VkDeviceMemory>	m_colorTargetMemory;
4281	de::MovePtr<vk::Allocation>		m_colorTargetMemory2;
4282	vk::Move<vk::VkImage>			m_colorTarget;
4283	vk::Move<vk::VkImageView>		m_colorTargetView;
4284	vk::Move<vk::VkFramebuffer>		m_framebuffer;
4285	vector<RenderPassCommand*>		m_commands;
4286};
4287
4288SubmitRenderPass::SubmitRenderPass (const vector<RenderPassCommand*>& commands)
4289	: m_targetWidth		(256)
4290	, m_targetHeight	(256)
4291	, m_commands		(commands)
4292{
4293}
4294
4295SubmitRenderPass::~SubmitRenderPass()
4296{
4297	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4298		delete m_commands[cmdNdx];
4299}
4300
4301void SubmitRenderPass::logPrepare (TestLog& log, size_t commandIndex) const
4302{
4303	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4304	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4305
4306	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4307	{
4308		RenderPassCommand& command = *m_commands[cmdNdx];
4309		command.logPrepare(log, cmdNdx);
4310	}
4311}
4312
4313void SubmitRenderPass::logSubmit (TestLog& log, size_t commandIndex) const
4314{
4315	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4316	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4317
4318	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4319	{
4320		RenderPassCommand& command = *m_commands[cmdNdx];
4321		command.logSubmit(log, cmdNdx);
4322	}
4323}
4324
4325void SubmitRenderPass::prepare (PrepareContext& context)
4326{
4327	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4328	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4329	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4330	const vk::VkDevice						device			= context.getContext().getDevice();
4331	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4332
4333	const vk::VkAttachmentReference	colorAttachments[]	=
4334	{
4335		{ 0, vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }
4336	};
4337	const vk::VkSubpassDescription	subpass				=
4338	{
4339		0u,
4340		vk::VK_PIPELINE_BIND_POINT_GRAPHICS,
4341
4342		0u,
4343		DE_NULL,
4344
4345		DE_LENGTH_OF_ARRAY(colorAttachments),
4346		colorAttachments,
4347		DE_NULL,
4348		DE_NULL,
4349		0u,
4350		DE_NULL
4351	};
4352	const vk::VkAttachmentDescription attachment =
4353	{
4354		0u,
4355		vk::VK_FORMAT_R8G8B8A8_UNORM,
4356		vk::VK_SAMPLE_COUNT_1_BIT,
4357
4358		vk::VK_ATTACHMENT_LOAD_OP_CLEAR,
4359		vk::VK_ATTACHMENT_STORE_OP_STORE,
4360
4361		vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,
4362		vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,
4363
4364		vk::VK_IMAGE_LAYOUT_UNDEFINED,
4365		vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
4366	};
4367	{
4368		const vk::VkImageCreateInfo createInfo =
4369		{
4370			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4371			DE_NULL,
4372			0u,
4373
4374			vk::VK_IMAGE_TYPE_2D,
4375			vk::VK_FORMAT_R8G8B8A8_UNORM,
4376			{ (deUint32)m_targetWidth, (deUint32)m_targetHeight, 1u },
4377			1u,
4378			1u,
4379			vk::VK_SAMPLE_COUNT_1_BIT,
4380			vk::VK_IMAGE_TILING_OPTIMAL,
4381			vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4382			vk::VK_SHARING_MODE_EXCLUSIVE,
4383			(deUint32)queueFamilies.size(),
4384			&queueFamilies[0],
4385			vk::VK_IMAGE_LAYOUT_UNDEFINED
4386		};
4387
4388		m_colorTarget = vk::createImage(vkd, device, &createInfo);
4389	}
4390
4391	m_colorTargetMemory = bindImageMemory(vki, vkd, physicalDevice, device, *m_colorTarget, 0);
4392
4393	{
4394		const vk::VkImageViewCreateInfo createInfo =
4395		{
4396			vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
4397			DE_NULL,
4398
4399			0u,
4400			*m_colorTarget,
4401			vk::VK_IMAGE_VIEW_TYPE_2D,
4402			vk::VK_FORMAT_R8G8B8A8_UNORM,
4403			{
4404				vk::VK_COMPONENT_SWIZZLE_R,
4405				vk::VK_COMPONENT_SWIZZLE_G,
4406				vk::VK_COMPONENT_SWIZZLE_B,
4407				vk::VK_COMPONENT_SWIZZLE_A
4408			},
4409			{
4410				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4411				0u,
4412				1u,
4413				0u,
4414				1u
4415			}
4416		};
4417
4418		m_colorTargetView = vk::createImageView(vkd, device, &createInfo);
4419	}
4420	{
4421		const vk::VkRenderPassCreateInfo createInfo =
4422		{
4423			vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
4424			DE_NULL,
4425			0u,
4426
4427			1u,
4428			&attachment,
4429
4430			1u,
4431			&subpass,
4432
4433			0,
4434			DE_NULL
4435		};
4436
4437		m_renderPass = vk::createRenderPass(vkd, device, &createInfo);
4438	}
4439
4440	{
4441		const vk::VkImageView				imageViews[]	=
4442		{
4443			*m_colorTargetView
4444		};
4445		const vk::VkFramebufferCreateInfo	createInfo		=
4446		{
4447			vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
4448			DE_NULL,
4449			0u,
4450
4451			*m_renderPass,
4452			DE_LENGTH_OF_ARRAY(imageViews),
4453			imageViews,
4454			(deUint32)m_targetWidth,
4455			(deUint32)m_targetHeight,
4456			1u
4457		};
4458
4459		m_framebuffer = vk::createFramebuffer(vkd, device, &createInfo);
4460	}
4461
4462	{
4463		PrepareRenderPassContext renderpassContext (context, *m_renderPass, *m_framebuffer, m_targetWidth, m_targetHeight);
4464
4465		for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4466		{
4467			RenderPassCommand& command = *m_commands[cmdNdx];
4468			command.prepare(renderpassContext);
4469		}
4470	}
4471}
4472
4473void SubmitRenderPass::submit (SubmitContext& context)
4474{
4475	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
4476	const vk::VkCommandBuffer		commandBuffer	= context.getCommandBuffer();
4477	const vk::VkClearValue			clearValue		= vk::makeClearValueColorF32(0.0f, 0.0f, 0.0f, 1.0f);
4478
4479	const vk::VkRenderPassBeginInfo	beginInfo		=
4480	{
4481		vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
4482		DE_NULL,
4483
4484		*m_renderPass,
4485		*m_framebuffer,
4486
4487		{ { 0, 0 },  { (deUint32)m_targetWidth, (deUint32)m_targetHeight } },
4488		1u,
4489		&clearValue
4490	};
4491
4492	vkd.cmdBeginRenderPass(commandBuffer, &beginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
4493
4494	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4495	{
4496		RenderPassCommand& command = *m_commands[cmdNdx];
4497
4498		command.submit(context);
4499	}
4500
4501	vkd.cmdEndRenderPass(commandBuffer);
4502}
4503
4504void SubmitRenderPass::verify (VerifyContext& context, size_t commandIndex)
4505{
4506	TestLog&					log				(context.getLog());
4507	tcu::ResultCollector&		resultCollector	(context.getResultCollector());
4508	const string				sectionName		(de::toString(commandIndex) + ":" + getName());
4509	const tcu::ScopedLogSection	section			(log, sectionName, sectionName);
4510	VerifyRenderPassContext		verifyContext	(context, m_targetWidth, m_targetHeight);
4511
4512	tcu::clear(verifyContext.getReferenceTarget().getAccess(), Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4513
4514	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4515	{
4516		RenderPassCommand& command = *m_commands[cmdNdx];
4517		command.verify(verifyContext, cmdNdx);
4518	}
4519
4520	{
4521		const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4522		const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4523		const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4524		const vk::VkDevice						device			= context.getContext().getDevice();
4525		const vk::VkQueue						queue			= context.getContext().getQueue();
4526		const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
4527		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4528		const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4529		const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_targetWidth * m_targetHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4530		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4531		{
4532			const vk::VkImageMemoryBarrier		imageBarrier	=
4533			{
4534				vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4535				DE_NULL,
4536
4537				vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
4538				vk::VK_ACCESS_TRANSFER_READ_BIT,
4539
4540				vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4541				vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4542
4543				VK_QUEUE_FAMILY_IGNORED,
4544				VK_QUEUE_FAMILY_IGNORED,
4545
4546				*m_colorTarget,
4547				{
4548					vk::VK_IMAGE_ASPECT_COLOR_BIT,
4549					0,	// Mip level
4550					1,	// Mip level count
4551					0,	// Layer
4552					1	// Layer count
4553				}
4554			};
4555			const vk::VkBufferMemoryBarrier bufferBarrier =
4556			{
4557				vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4558				DE_NULL,
4559
4560				vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4561				vk::VK_ACCESS_HOST_READ_BIT,
4562
4563				VK_QUEUE_FAMILY_IGNORED,
4564				VK_QUEUE_FAMILY_IGNORED,
4565				*dstBuffer,
4566				0,
4567				VK_WHOLE_SIZE
4568			};
4569			const vk::VkBufferImageCopy	region =
4570			{
4571				0,
4572				0, 0,
4573				{
4574					vk::VK_IMAGE_ASPECT_COLOR_BIT,
4575					0,	// mipLevel
4576					0,	// arrayLayer
4577					1	// layerCount
4578				},
4579				{ 0, 0, 0 },
4580				{
4581					(deUint32)m_targetWidth,
4582					(deUint32)m_targetHeight,
4583					1u
4584				}
4585			};
4586
4587			vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4588			vkd.cmdCopyImageToBuffer(*commandBuffer, *m_colorTarget, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
4589			vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4590		}
4591
4592		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4593		queueRun(vkd, queue, *commandBuffer);
4594
4595		{
4596			void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_targetWidth * m_targetHeight);
4597
4598			vk::invalidateMappedMemoryRange(vkd, device, *memory, 0,  4 * m_targetWidth * m_targetHeight);
4599
4600			{
4601				const deUint8* const			data		= (const deUint8*)ptr;
4602				const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_targetWidth, m_targetHeight, 1, data);
4603				const ConstPixelBufferAccess&	refAccess	(verifyContext.getReferenceTarget().getAccess());
4604
4605				if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4606					resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4607			}
4608
4609			vkd.unmapMemory(device, *memory);
4610		}
4611	}
4612}
4613
4614struct PipelineResources
4615{
4616	vk::Move<vk::VkPipeline>			pipeline;
4617	vk::Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
4618	vk::Move<vk::VkPipelineLayout>		pipelineLayout;
4619};
4620
4621void createPipelineWithResources (const vk::DeviceInterface&							vkd,
4622								  const vk::VkDevice									device,
4623								  const vk::VkRenderPass								renderPass,
4624								  const deUint32										subpass,
4625								  const vk::VkShaderModule&								vertexShaderModule,
4626								  const vk::VkShaderModule&								fragmentShaderModule,
4627								  const deUint32										viewPortWidth,
4628								  const deUint32										viewPortHeight,
4629								  const vector<vk::VkVertexInputBindingDescription>&	vertexBindingDescriptions,
4630								  const vector<vk::VkVertexInputAttributeDescription>&	vertexAttributeDescriptions,
4631								  const vector<vk::VkDescriptorSetLayoutBinding>&		bindings,
4632								  const vk::VkPrimitiveTopology							topology,
4633								  deUint32												pushConstantRangeCount,
4634								  const vk::VkPushConstantRange*						pushConstantRanges,
4635								  PipelineResources&									resources)
4636{
4637	if (!bindings.empty())
4638	{
4639		const vk::VkDescriptorSetLayoutCreateInfo createInfo =
4640		{
4641			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
4642			DE_NULL,
4643
4644			0u,
4645			(deUint32)bindings.size(),
4646			bindings.empty() ? DE_NULL : &bindings[0]
4647		};
4648
4649		resources.descriptorSetLayout = vk::createDescriptorSetLayout(vkd, device, &createInfo);
4650	}
4651
4652	{
4653		const vk::VkDescriptorSetLayout			descriptorSetLayout_	= *resources.descriptorSetLayout;
4654		const vk::VkPipelineLayoutCreateInfo	createInfo				=
4655		{
4656			vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
4657			DE_NULL,
4658			0,
4659
4660			resources.descriptorSetLayout ? 1u : 0u,
4661			resources.descriptorSetLayout ? &descriptorSetLayout_ : DE_NULL,
4662
4663			pushConstantRangeCount,
4664			pushConstantRanges
4665		};
4666
4667		resources.pipelineLayout = vk::createPipelineLayout(vkd, device, &createInfo);
4668	}
4669
4670	{
4671		const vk::VkPipelineShaderStageCreateInfo			shaderStages[]					=
4672		{
4673			{
4674				vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4675				DE_NULL,
4676				0,
4677				vk::VK_SHADER_STAGE_VERTEX_BIT,
4678				vertexShaderModule,
4679				"main",
4680				DE_NULL
4681			},
4682			{
4683				vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4684				DE_NULL,
4685				0,
4686				vk::VK_SHADER_STAGE_FRAGMENT_BIT,
4687				fragmentShaderModule,
4688				"main",
4689				DE_NULL
4690			}
4691		};
4692		const vk::VkPipelineDepthStencilStateCreateInfo		depthStencilState				=
4693		{
4694			vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
4695			DE_NULL,
4696			0u,
4697			DE_FALSE,
4698			DE_FALSE,
4699			vk::VK_COMPARE_OP_ALWAYS,
4700			DE_FALSE,
4701			DE_FALSE,
4702			{
4703				vk::VK_STENCIL_OP_KEEP,
4704				vk::VK_STENCIL_OP_KEEP,
4705				vk::VK_STENCIL_OP_KEEP,
4706				vk::VK_COMPARE_OP_ALWAYS,
4707				0u,
4708				0u,
4709				0u,
4710			},
4711			{
4712				vk::VK_STENCIL_OP_KEEP,
4713				vk::VK_STENCIL_OP_KEEP,
4714				vk::VK_STENCIL_OP_KEEP,
4715				vk::VK_COMPARE_OP_ALWAYS,
4716				0u,
4717				0u,
4718				0u,
4719			},
4720			-1.0f,
4721			+1.0f
4722		};
4723		const vk::VkPipelineVertexInputStateCreateInfo		vertexInputState				=
4724		{
4725			vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
4726			DE_NULL,
4727			0u,
4728
4729			(deUint32)vertexBindingDescriptions.size(),
4730			vertexBindingDescriptions.empty() ? DE_NULL : &vertexBindingDescriptions[0],
4731
4732			(deUint32)vertexAttributeDescriptions.size(),
4733			vertexAttributeDescriptions.empty() ? DE_NULL : &vertexAttributeDescriptions[0]
4734		};
4735		const vk::VkPipelineInputAssemblyStateCreateInfo	inputAssemblyState				=
4736		{
4737			vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
4738			DE_NULL,
4739			0,
4740			topology,
4741			VK_FALSE
4742		};
4743		const vk::VkViewport								viewports[]						=
4744		{
4745			{ 0.0f, 0.0f, (float)viewPortWidth, (float)viewPortHeight, 0.0f, 1.0f }
4746		};
4747		const vk::VkRect2D									scissors[]						=
4748		{
4749			{ { 0, 0 }, { (deUint32)viewPortWidth, (deUint32)viewPortHeight } }
4750		};
4751		const vk::VkPipelineViewportStateCreateInfo			viewportState					=
4752		{
4753			vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
4754			DE_NULL,
4755			0,
4756			DE_LENGTH_OF_ARRAY(viewports),
4757			viewports,
4758			DE_LENGTH_OF_ARRAY(scissors),
4759			scissors
4760		};
4761		const vk::VkPipelineRasterizationStateCreateInfo	rasterState						=
4762		{
4763			vk::VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
4764			DE_NULL,
4765			0,
4766
4767			VK_TRUE,
4768			VK_FALSE,
4769			vk::VK_POLYGON_MODE_FILL,
4770			vk::VK_CULL_MODE_NONE,
4771			vk::VK_FRONT_FACE_COUNTER_CLOCKWISE,
4772			VK_FALSE,
4773			0.0f,
4774			0.0f,
4775			0.0f,
4776			1.0f
4777		};
4778		const vk::VkSampleMask								sampleMask						= ~0u;
4779		const vk::VkPipelineMultisampleStateCreateInfo		multisampleState				=
4780		{
4781			vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
4782			DE_NULL,
4783			0,
4784
4785			vk::VK_SAMPLE_COUNT_1_BIT,
4786			VK_FALSE,
4787			0.0f,
4788			&sampleMask,
4789			VK_FALSE,
4790			VK_FALSE
4791		};
4792		const vk::VkPipelineColorBlendAttachmentState		attachments[]					=
4793		{
4794			{
4795				VK_FALSE,
4796				vk::VK_BLEND_FACTOR_ONE,
4797				vk::VK_BLEND_FACTOR_ZERO,
4798				vk::VK_BLEND_OP_ADD,
4799				vk::VK_BLEND_FACTOR_ONE,
4800				vk::VK_BLEND_FACTOR_ZERO,
4801				vk::VK_BLEND_OP_ADD,
4802				(vk::VK_COLOR_COMPONENT_R_BIT|
4803				 vk::VK_COLOR_COMPONENT_G_BIT|
4804				 vk::VK_COLOR_COMPONENT_B_BIT|
4805				 vk::VK_COLOR_COMPONENT_A_BIT)
4806			}
4807		};
4808		const vk::VkPipelineColorBlendStateCreateInfo		colorBlendState					=
4809		{
4810			vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
4811			DE_NULL,
4812			0,
4813
4814			VK_FALSE,
4815			vk::VK_LOGIC_OP_COPY,
4816			DE_LENGTH_OF_ARRAY(attachments),
4817			attachments,
4818			{ 0.0f, 0.0f, 0.0f, 0.0f }
4819		};
4820		const vk::VkGraphicsPipelineCreateInfo				createInfo						=
4821		{
4822			vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
4823			DE_NULL,
4824			0u,
4825
4826			DE_LENGTH_OF_ARRAY(shaderStages),
4827			shaderStages,
4828
4829			&vertexInputState,
4830			&inputAssemblyState,
4831			DE_NULL,
4832			&viewportState,
4833			&rasterState,
4834			&multisampleState,
4835			&depthStencilState,
4836			&colorBlendState,
4837			DE_NULL,
4838			*resources.pipelineLayout,
4839			renderPass,
4840			subpass,
4841			0,
4842			0
4843		};
4844
4845		resources.pipeline = vk::createGraphicsPipeline(vkd, device, 0, &createInfo);
4846	}
4847}
4848
4849class RenderIndexBuffer : public RenderPassCommand
4850{
4851public:
4852				RenderIndexBuffer	(void) {}
4853				~RenderIndexBuffer	(void) {}
4854
4855	const char*	getName				(void) const { return "RenderIndexBuffer"; }
4856	void		logPrepare			(TestLog&, size_t) const;
4857	void		logSubmit			(TestLog&, size_t) const;
4858	void		prepare				(PrepareRenderPassContext&);
4859	void		submit				(SubmitContext& context);
4860	void		verify				(VerifyRenderPassContext&, size_t);
4861
4862private:
4863	PipelineResources				m_resources;
4864	vk::VkDeviceSize				m_bufferSize;
4865};
4866
4867void RenderIndexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4868{
4869	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as index buffer." << TestLog::EndMessage;
4870}
4871
4872void RenderIndexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4873{
4874	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as index buffer." << TestLog::EndMessage;
4875}
4876
4877void RenderIndexBuffer::prepare (PrepareRenderPassContext& context)
4878{
4879	const vk::DeviceInterface&				vkd						= context.getContext().getDeviceInterface();
4880	const vk::VkDevice						device					= context.getContext().getDevice();
4881	const vk::VkRenderPass					renderPass				= context.getRenderPass();
4882	const deUint32							subpass					= 0;
4883	const vk::Unique<vk::VkShaderModule>	vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("index-buffer.vert"), 0));
4884	const vk::Unique<vk::VkShaderModule>	fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4885
4886	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4887								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4888	m_bufferSize = context.getBufferSize();
4889}
4890
4891void RenderIndexBuffer::submit (SubmitContext& context)
4892{
4893	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4894	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4895
4896	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4897	vkd.cmdBindIndexBuffer(commandBuffer, context.getBuffer(), 0, vk::VK_INDEX_TYPE_UINT16);
4898	vkd.cmdDrawIndexed(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0, 0);
4899}
4900
4901void RenderIndexBuffer::verify (VerifyRenderPassContext& context, size_t)
4902{
4903	for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4904	{
4905		const deUint8 x  = context.getReference().get(pos * 2);
4906		const deUint8 y  = context.getReference().get((pos * 2) + 1);
4907
4908		context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4909	}
4910}
4911
4912class RenderVertexBuffer : public RenderPassCommand
4913{
4914public:
4915				RenderVertexBuffer	(void) {}
4916				~RenderVertexBuffer	(void) {}
4917
4918	const char*	getName				(void) const { return "RenderVertexBuffer"; }
4919	void		logPrepare			(TestLog&, size_t) const;
4920	void		logSubmit			(TestLog&, size_t) const;
4921	void		prepare				(PrepareRenderPassContext&);
4922	void		submit				(SubmitContext& context);
4923	void		verify				(VerifyRenderPassContext&, size_t);
4924
4925private:
4926	PipelineResources	m_resources;
4927	vk::VkDeviceSize	m_bufferSize;
4928};
4929
4930void RenderVertexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4931{
4932	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as vertex buffer." << TestLog::EndMessage;
4933}
4934
4935void RenderVertexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4936{
4937	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as vertex buffer." << TestLog::EndMessage;
4938}
4939
4940void RenderVertexBuffer::prepare (PrepareRenderPassContext& context)
4941{
4942	const vk::DeviceInterface&						vkd						= context.getContext().getDeviceInterface();
4943	const vk::VkDevice								device					= context.getContext().getDevice();
4944	const vk::VkRenderPass							renderPass				= context.getRenderPass();
4945	const deUint32									subpass					= 0;
4946	const vk::Unique<vk::VkShaderModule>			vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("vertex-buffer.vert"), 0));
4947	const vk::Unique<vk::VkShaderModule>			fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4948
4949	vector<vk::VkVertexInputAttributeDescription>	vertexAttributeDescriptions;
4950	vector<vk::VkVertexInputBindingDescription>		vertexBindingDescriptions;
4951
4952	{
4953		const vk::VkVertexInputBindingDescription vertexBindingDescription =
4954			{
4955				0,
4956				2,
4957				vk::VK_VERTEX_INPUT_RATE_VERTEX
4958			};
4959
4960		vertexBindingDescriptions.push_back(vertexBindingDescription);
4961	}
4962	{
4963		const vk::VkVertexInputAttributeDescription vertexAttributeDescription =
4964		{
4965			0,
4966			0,
4967			vk::VK_FORMAT_R8G8_UNORM,
4968			0
4969		};
4970
4971		vertexAttributeDescriptions.push_back(vertexAttributeDescription);
4972	}
4973	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4974								vertexBindingDescriptions, vertexAttributeDescriptions, vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4975
4976	m_bufferSize = context.getBufferSize();
4977}
4978
4979void RenderVertexBuffer::submit (SubmitContext& context)
4980{
4981	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4982	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4983	const vk::VkDeviceSize		offset			= 0;
4984	const vk::VkBuffer			buffer			= context.getBuffer();
4985
4986	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4987	vkd.cmdBindVertexBuffers(commandBuffer, 0, 1, &buffer, &offset);
4988	vkd.cmdDraw(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0);
4989}
4990
4991void RenderVertexBuffer::verify (VerifyRenderPassContext& context, size_t)
4992{
4993	for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4994	{
4995		const deUint8 x  = context.getReference().get(pos * 2);
4996		const deUint8 y  = context.getReference().get((pos * 2) + 1);
4997
4998		context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4999	}
5000}
5001
5002class RenderVertexUniformBuffer : public RenderPassCommand
5003{
5004public:
5005									RenderVertexUniformBuffer	(void) {}
5006									~RenderVertexUniformBuffer	(void);
5007
5008	const char*						getName						(void) const { return "RenderVertexUniformBuffer"; }
5009	void							logPrepare					(TestLog&, size_t) const;
5010	void							logSubmit					(TestLog&, size_t) const;
5011	void							prepare						(PrepareRenderPassContext&);
5012	void							submit						(SubmitContext& context);
5013	void							verify						(VerifyRenderPassContext&, size_t);
5014
5015private:
5016	PipelineResources				m_resources;
5017	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5018	vector<vk::VkDescriptorSet>		m_descriptorSets;
5019
5020	vk::VkDeviceSize				m_bufferSize;
5021};
5022
5023RenderVertexUniformBuffer::~RenderVertexUniformBuffer (void)
5024{
5025}
5026
5027void RenderVertexUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5028{
5029	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5030}
5031
5032void RenderVertexUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5033{
5034	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5035}
5036
5037void RenderVertexUniformBuffer::prepare (PrepareRenderPassContext& context)
5038{
5039	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5040	const vk::VkDevice							device					= context.getContext().getDevice();
5041	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5042	const deUint32								subpass					= 0;
5043	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.vert"), 0));
5044	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5045	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5046
5047	m_bufferSize = context.getBufferSize();
5048
5049	{
5050		const vk::VkDescriptorSetLayoutBinding binding =
5051		{
5052			0u,
5053			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5054			1,
5055			vk::VK_SHADER_STAGE_VERTEX_BIT,
5056			DE_NULL
5057		};
5058
5059		bindings.push_back(binding);
5060	}
5061
5062	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5063								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5064
5065	{
5066		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
5067		const vk::VkDescriptorPoolSize			poolSizes		=
5068		{
5069			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5070			descriptorCount
5071		};
5072		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5073		{
5074			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5075			DE_NULL,
5076			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5077
5078			descriptorCount,
5079			1u,
5080			&poolSizes,
5081		};
5082
5083		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5084		m_descriptorSets.resize(descriptorCount);
5085	}
5086
5087	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5088	{
5089		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5090		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5091		{
5092			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5093			DE_NULL,
5094
5095			*m_descriptorPool,
5096			1,
5097			&layout
5098		};
5099
5100		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5101
5102		{
5103			const vk::VkDescriptorBufferInfo		bufferInfo	=
5104			{
5105				context.getBuffer(),
5106				(vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
5107				m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5108					? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5109					: (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5110			};
5111			const vk::VkWriteDescriptorSet			write		=
5112			{
5113				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5114				DE_NULL,
5115				m_descriptorSets[descriptorSetNdx],
5116				0u,
5117				0u,
5118				1u,
5119				vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5120				DE_NULL,
5121				&bufferInfo,
5122				DE_NULL,
5123			};
5124
5125			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5126		}
5127	}
5128}
5129
5130void RenderVertexUniformBuffer::submit (SubmitContext& context)
5131{
5132	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5133	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5134
5135	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5136
5137	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5138	{
5139		const size_t	size	= (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5140								? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5141								: (size_t)MAX_UNIFORM_BUFFER_SIZE);
5142		const deUint32	count	= (deUint32)(size / 2);
5143
5144		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5145		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5146	}
5147}
5148
5149void RenderVertexUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
5150{
5151	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5152	{
5153		const size_t	offset	= descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
5154		const size_t	size	= (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5155								? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5156								: (size_t)MAX_UNIFORM_BUFFER_SIZE);
5157		const size_t	count	= size / 2;
5158
5159		for (size_t pos = 0; pos < count; pos++)
5160		{
5161			const deUint8 x  = context.getReference().get(offset + pos * 2);
5162			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5163
5164			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5165		}
5166	}
5167}
5168
5169class RenderVertexUniformTexelBuffer : public RenderPassCommand
5170{
5171public:
5172				RenderVertexUniformTexelBuffer	(void) {}
5173				~RenderVertexUniformTexelBuffer	(void);
5174
5175	const char*	getName							(void) const { return "RenderVertexUniformTexelBuffer"; }
5176	void		logPrepare						(TestLog&, size_t) const;
5177	void		logSubmit						(TestLog&, size_t) const;
5178	void		prepare							(PrepareRenderPassContext&);
5179	void		submit							(SubmitContext& context);
5180	void		verify							(VerifyRenderPassContext&, size_t);
5181
5182private:
5183	PipelineResources				m_resources;
5184	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5185	vector<vk::VkDescriptorSet>		m_descriptorSets;
5186	vector<vk::VkBufferView>		m_bufferViews;
5187
5188	const vk::DeviceInterface*		m_vkd;
5189	vk::VkDevice					m_device;
5190	vk::VkDeviceSize				m_bufferSize;
5191	deUint32						m_maxUniformTexelCount;
5192};
5193
5194RenderVertexUniformTexelBuffer::~RenderVertexUniformTexelBuffer (void)
5195{
5196	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5197	{
5198		if (!!m_bufferViews[bufferViewNdx])
5199		{
5200			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5201			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5202		}
5203	}
5204}
5205
5206void RenderVertexUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5207{
5208	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5209}
5210
5211void RenderVertexUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5212{
5213	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5214}
5215
5216void RenderVertexUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
5217{
5218	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
5219	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
5220	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5221	const vk::VkDevice							device					= context.getContext().getDevice();
5222	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5223	const deUint32								subpass					= 0;
5224	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.vert"), 0));
5225	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5226	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5227
5228	m_device				= device;
5229	m_vkd					= &vkd;
5230	m_bufferSize			= context.getBufferSize();
5231	m_maxUniformTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5232
5233	{
5234		const vk::VkDescriptorSetLayoutBinding binding =
5235		{
5236			0u,
5237			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5238			1,
5239			vk::VK_SHADER_STAGE_VERTEX_BIT,
5240			DE_NULL
5241		};
5242
5243		bindings.push_back(binding);
5244	}
5245
5246	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5247								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5248
5249	{
5250		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 2));
5251		const vk::VkDescriptorPoolSize			poolSizes		=
5252		{
5253			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5254			descriptorCount
5255		};
5256		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5257		{
5258			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5259			DE_NULL,
5260			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5261
5262			descriptorCount,
5263			1u,
5264			&poolSizes,
5265		};
5266
5267		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5268		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5269		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5270	}
5271
5272	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5273	{
5274		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5275																? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5276																: m_maxUniformTexelCount * 2) / 2;
5277		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5278		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5279		{
5280			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5281			DE_NULL,
5282
5283			*m_descriptorPool,
5284			1,
5285			&layout
5286		};
5287
5288		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5289
5290		{
5291			const vk::VkBufferViewCreateInfo createInfo =
5292			{
5293				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5294				DE_NULL,
5295				0u,
5296
5297				context.getBuffer(),
5298				vk::VK_FORMAT_R16_UINT,
5299				descriptorSetNdx * m_maxUniformTexelCount * 2,
5300				count * 2
5301			};
5302
5303			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5304		}
5305
5306		{
5307			const vk::VkWriteDescriptorSet			write		=
5308			{
5309				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5310				DE_NULL,
5311				m_descriptorSets[descriptorSetNdx],
5312				0u,
5313				0u,
5314				1u,
5315				vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5316				DE_NULL,
5317				DE_NULL,
5318				&m_bufferViews[descriptorSetNdx]
5319			};
5320
5321			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5322		}
5323	}
5324}
5325
5326void RenderVertexUniformTexelBuffer::submit (SubmitContext& context)
5327{
5328	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5329	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5330
5331	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5332
5333	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5334	{
5335		const deUint32 count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5336								? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5337								: m_maxUniformTexelCount * 2) / 2;
5338
5339		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5340		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5341	}
5342}
5343
5344void RenderVertexUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5345{
5346	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5347	{
5348		const size_t	offset	= descriptorSetNdx * m_maxUniformTexelCount * 2;
5349		const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5350								? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5351								: m_maxUniformTexelCount * 2) / 2;
5352
5353		for (size_t pos = 0; pos < (size_t)count; pos++)
5354		{
5355			const deUint8 x  = context.getReference().get(offset + pos * 2);
5356			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5357
5358			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5359		}
5360	}
5361}
5362
5363class RenderVertexStorageBuffer : public RenderPassCommand
5364{
5365public:
5366				RenderVertexStorageBuffer	(void) {}
5367				~RenderVertexStorageBuffer	(void);
5368
5369	const char*	getName						(void) const { return "RenderVertexStorageBuffer"; }
5370	void		logPrepare					(TestLog&, size_t) const;
5371	void		logSubmit					(TestLog&, size_t) const;
5372	void		prepare						(PrepareRenderPassContext&);
5373	void		submit						(SubmitContext& context);
5374	void		verify						(VerifyRenderPassContext&, size_t);
5375
5376private:
5377	PipelineResources				m_resources;
5378	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5379	vector<vk::VkDescriptorSet>		m_descriptorSets;
5380
5381	vk::VkDeviceSize				m_bufferSize;
5382};
5383
5384RenderVertexStorageBuffer::~RenderVertexStorageBuffer (void)
5385{
5386}
5387
5388void RenderVertexStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5389{
5390	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5391}
5392
5393void RenderVertexStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5394{
5395	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5396}
5397
5398void RenderVertexStorageBuffer::prepare (PrepareRenderPassContext& context)
5399{
5400	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5401	const vk::VkDevice							device					= context.getContext().getDevice();
5402	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5403	const deUint32								subpass					= 0;
5404	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.vert"), 0));
5405	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5406	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5407
5408	m_bufferSize = context.getBufferSize();
5409
5410	{
5411		const vk::VkDescriptorSetLayoutBinding binding =
5412		{
5413			0u,
5414			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5415			1,
5416			vk::VK_SHADER_STAGE_VERTEX_BIT,
5417			DE_NULL
5418		};
5419
5420		bindings.push_back(binding);
5421	}
5422
5423	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5424								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5425
5426	{
5427		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE));
5428		const vk::VkDescriptorPoolSize			poolSizes		=
5429		{
5430			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5431			descriptorCount
5432		};
5433		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5434		{
5435			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5436			DE_NULL,
5437			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5438
5439			descriptorCount,
5440			1u,
5441			&poolSizes,
5442		};
5443
5444		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5445		m_descriptorSets.resize(descriptorCount);
5446	}
5447
5448	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5449	{
5450		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5451		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5452		{
5453			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5454			DE_NULL,
5455
5456			*m_descriptorPool,
5457			1,
5458			&layout
5459		};
5460
5461		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5462
5463		{
5464			const vk::VkDescriptorBufferInfo		bufferInfo	=
5465			{
5466				context.getBuffer(),
5467				descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5468				de::min(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,  (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE)
5469			};
5470			const vk::VkWriteDescriptorSet			write		=
5471			{
5472				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5473				DE_NULL,
5474				m_descriptorSets[descriptorSetNdx],
5475				0u,
5476				0u,
5477				1u,
5478				vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5479				DE_NULL,
5480				&bufferInfo,
5481				DE_NULL,
5482			};
5483
5484			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5485		}
5486	}
5487}
5488
5489void RenderVertexStorageBuffer::submit (SubmitContext& context)
5490{
5491	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5492	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5493
5494	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5495
5496	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5497	{
5498		const size_t size	= m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5499							? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5500							: (size_t)(MAX_STORAGE_BUFFER_SIZE);
5501
5502		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5503		vkd.cmdDraw(commandBuffer, (deUint32)(size / 2), 1, 0, 0);
5504	}
5505}
5506
5507void RenderVertexStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
5508{
5509	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5510	{
5511		const size_t offset	= descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE;
5512		const size_t size	= m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5513							? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5514							: (size_t)(MAX_STORAGE_BUFFER_SIZE);
5515
5516		for (size_t pos = 0; pos < size / 2; pos++)
5517		{
5518			const deUint8 x  = context.getReference().get(offset + pos * 2);
5519			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5520
5521			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5522		}
5523	}
5524}
5525
5526class RenderVertexStorageTexelBuffer : public RenderPassCommand
5527{
5528public:
5529				RenderVertexStorageTexelBuffer	(void) {}
5530				~RenderVertexStorageTexelBuffer	(void);
5531
5532	const char*	getName							(void) const { return "RenderVertexStorageTexelBuffer"; }
5533	void		logPrepare						(TestLog&, size_t) const;
5534	void		logSubmit						(TestLog&, size_t) const;
5535	void		prepare							(PrepareRenderPassContext&);
5536	void		submit							(SubmitContext& context);
5537	void		verify							(VerifyRenderPassContext&, size_t);
5538
5539private:
5540	PipelineResources				m_resources;
5541	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5542	vector<vk::VkDescriptorSet>		m_descriptorSets;
5543	vector<vk::VkBufferView>		m_bufferViews;
5544
5545	const vk::DeviceInterface*		m_vkd;
5546	vk::VkDevice					m_device;
5547	vk::VkDeviceSize				m_bufferSize;
5548	deUint32						m_maxStorageTexelCount;
5549};
5550
5551RenderVertexStorageTexelBuffer::~RenderVertexStorageTexelBuffer (void)
5552{
5553	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5554	{
5555		if (!!m_bufferViews[bufferViewNdx])
5556		{
5557			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5558			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5559		}
5560	}
5561}
5562
5563void RenderVertexStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5564{
5565	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5566}
5567
5568void RenderVertexStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5569{
5570	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5571}
5572
5573void RenderVertexStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
5574{
5575	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
5576	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
5577	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5578	const vk::VkDevice							device					= context.getContext().getDevice();
5579	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5580	const deUint32								subpass					= 0;
5581	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.vert"), 0));
5582	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5583	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5584
5585	m_device				= device;
5586	m_vkd					= &vkd;
5587	m_bufferSize			= context.getBufferSize();
5588	m_maxStorageTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5589
5590	{
5591		const vk::VkDescriptorSetLayoutBinding binding =
5592		{
5593			0u,
5594			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5595			1,
5596			vk::VK_SHADER_STAGE_VERTEX_BIT,
5597			DE_NULL
5598		};
5599
5600		bindings.push_back(binding);
5601	}
5602
5603	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5604								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5605
5606	{
5607		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
5608		const vk::VkDescriptorPoolSize			poolSizes		=
5609		{
5610			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5611			descriptorCount
5612		};
5613		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5614		{
5615			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5616			DE_NULL,
5617			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5618
5619			descriptorCount,
5620			1u,
5621			&poolSizes,
5622		};
5623
5624		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5625		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5626		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5627	}
5628
5629	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5630	{
5631		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5632		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5633		{
5634			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5635			DE_NULL,
5636
5637			*m_descriptorPool,
5638			1,
5639			&layout
5640		};
5641
5642		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5643
5644		{
5645			const vk::VkBufferViewCreateInfo createInfo =
5646			{
5647				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5648				DE_NULL,
5649				0u,
5650
5651				context.getBuffer(),
5652				vk::VK_FORMAT_R32_UINT,
5653				descriptorSetNdx * m_maxStorageTexelCount * 4,
5654				(deUint32)de::min<vk::VkDeviceSize>(m_maxStorageTexelCount * 4, m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4)
5655			};
5656
5657			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5658		}
5659
5660		{
5661			const vk::VkWriteDescriptorSet			write		=
5662			{
5663				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5664				DE_NULL,
5665				m_descriptorSets[descriptorSetNdx],
5666				0u,
5667				0u,
5668				1u,
5669				vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5670				DE_NULL,
5671				DE_NULL,
5672				&m_bufferViews[descriptorSetNdx]
5673			};
5674
5675			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5676		}
5677	}
5678}
5679
5680void RenderVertexStorageTexelBuffer::submit (SubmitContext& context)
5681{
5682	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5683	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5684
5685	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5686
5687	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5688	{
5689		const deUint32 count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5690								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5691								: m_maxStorageTexelCount * 4) / 2;
5692
5693		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5694		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5695	}
5696}
5697
5698void RenderVertexStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5699{
5700	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5701	{
5702		const size_t	offset	= descriptorSetNdx * m_maxStorageTexelCount * 4;
5703		const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5704								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5705								: m_maxStorageTexelCount * 4) / 2;
5706
5707		DE_ASSERT(context.getReference().getSize() <= 4 * m_maxStorageTexelCount * m_descriptorSets.size());
5708		DE_ASSERT(context.getReference().getSize() > offset);
5709		DE_ASSERT(offset + count * 2 <= context.getReference().getSize());
5710
5711		for (size_t pos = 0; pos < (size_t)count; pos++)
5712		{
5713			const deUint8 x = context.getReference().get(offset + pos * 2);
5714			const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5715
5716			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5717		}
5718	}
5719}
5720
5721class RenderVertexStorageImage : public RenderPassCommand
5722{
5723public:
5724				RenderVertexStorageImage	(void) {}
5725				~RenderVertexStorageImage	(void);
5726
5727	const char*	getName						(void) const { return "RenderVertexStorageImage"; }
5728	void		logPrepare					(TestLog&, size_t) const;
5729	void		logSubmit					(TestLog&, size_t) const;
5730	void		prepare						(PrepareRenderPassContext&);
5731	void		submit						(SubmitContext& context);
5732	void		verify						(VerifyRenderPassContext&, size_t);
5733
5734private:
5735	PipelineResources				m_resources;
5736	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5737	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
5738	vk::Move<vk::VkImageView>		m_imageView;
5739};
5740
5741RenderVertexStorageImage::~RenderVertexStorageImage (void)
5742{
5743}
5744
5745void RenderVertexStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
5746{
5747	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
5748}
5749
5750void RenderVertexStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
5751{
5752	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
5753}
5754
5755void RenderVertexStorageImage::prepare (PrepareRenderPassContext& context)
5756{
5757	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5758	const vk::VkDevice							device					= context.getContext().getDevice();
5759	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5760	const deUint32								subpass					= 0;
5761	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.vert"), 0));
5762	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5763	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5764
5765	{
5766		const vk::VkDescriptorSetLayoutBinding binding =
5767		{
5768			0u,
5769			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5770			1,
5771			vk::VK_SHADER_STAGE_VERTEX_BIT,
5772			DE_NULL
5773		};
5774
5775		bindings.push_back(binding);
5776	}
5777
5778	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5779								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5780
5781	{
5782		const vk::VkDescriptorPoolSize			poolSizes		=
5783		{
5784			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5785			1
5786		};
5787		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5788		{
5789			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5790			DE_NULL,
5791			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5792
5793			1u,
5794			1u,
5795			&poolSizes,
5796		};
5797
5798		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5799	}
5800
5801	{
5802		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5803		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5804		{
5805			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5806			DE_NULL,
5807
5808			*m_descriptorPool,
5809			1,
5810			&layout
5811		};
5812
5813		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5814
5815		{
5816			const vk::VkImageViewCreateInfo createInfo =
5817			{
5818				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5819				DE_NULL,
5820				0u,
5821
5822				context.getImage(),
5823				vk::VK_IMAGE_VIEW_TYPE_2D,
5824				vk::VK_FORMAT_R8G8B8A8_UNORM,
5825				vk::makeComponentMappingRGBA(),
5826				{
5827					vk::VK_IMAGE_ASPECT_COLOR_BIT,
5828					0u,
5829					1u,
5830					0u,
5831					1u
5832				}
5833			};
5834
5835			m_imageView = vk::createImageView(vkd, device, &createInfo);
5836		}
5837
5838		{
5839			const vk::VkDescriptorImageInfo			imageInfo	=
5840			{
5841				0,
5842				*m_imageView,
5843				context.getImageLayout()
5844			};
5845			const vk::VkWriteDescriptorSet			write		=
5846			{
5847				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5848				DE_NULL,
5849				*m_descriptorSet,
5850				0u,
5851				0u,
5852				1u,
5853				vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5854				&imageInfo,
5855				DE_NULL,
5856				DE_NULL,
5857			};
5858
5859			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5860		}
5861	}
5862}
5863
5864void RenderVertexStorageImage::submit (SubmitContext& context)
5865{
5866	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5867	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5868
5869	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5870
5871	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5872	vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5873}
5874
5875void RenderVertexStorageImage::verify (VerifyRenderPassContext& context, size_t)
5876{
5877	for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5878	{
5879		const tcu::IVec3		size	= context.getReferenceImage().getAccess().getSize();
5880		const tcu::UVec4		pixel	= context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5881
5882		if (pos % 2 == 0)
5883			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5884		else
5885			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5886	}
5887}
5888
5889class RenderVertexSampledImage : public RenderPassCommand
5890{
5891public:
5892				RenderVertexSampledImage	(void) {}
5893				~RenderVertexSampledImage	(void);
5894
5895	const char*	getName						(void) const { return "RenderVertexSampledImage"; }
5896	void		logPrepare					(TestLog&, size_t) const;
5897	void		logSubmit					(TestLog&, size_t) const;
5898	void		prepare						(PrepareRenderPassContext&);
5899	void		submit						(SubmitContext& context);
5900	void		verify						(VerifyRenderPassContext&, size_t);
5901
5902private:
5903	PipelineResources				m_resources;
5904	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5905	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
5906	vk::Move<vk::VkImageView>		m_imageView;
5907	vk::Move<vk::VkSampler>			m_sampler;
5908};
5909
5910RenderVertexSampledImage::~RenderVertexSampledImage (void)
5911{
5912}
5913
5914void RenderVertexSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
5915{
5916	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render sampled image." << TestLog::EndMessage;
5917}
5918
5919void RenderVertexSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
5920{
5921	log << TestLog::Message << commandIndex << ":" << getName() << " Render using sampled image." << TestLog::EndMessage;
5922}
5923
5924void RenderVertexSampledImage::prepare (PrepareRenderPassContext& context)
5925{
5926	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5927	const vk::VkDevice							device					= context.getContext().getDevice();
5928	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5929	const deUint32								subpass					= 0;
5930	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.vert"), 0));
5931	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5932	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5933
5934	{
5935		const vk::VkDescriptorSetLayoutBinding binding =
5936		{
5937			0u,
5938			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5939			1,
5940			vk::VK_SHADER_STAGE_VERTEX_BIT,
5941			DE_NULL
5942		};
5943
5944		bindings.push_back(binding);
5945	}
5946
5947	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5948								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5949
5950	{
5951		const vk::VkDescriptorPoolSize			poolSizes		=
5952		{
5953			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5954			1
5955		};
5956		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5957		{
5958			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5959			DE_NULL,
5960			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5961
5962			1u,
5963			1u,
5964			&poolSizes,
5965		};
5966
5967		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5968	}
5969
5970	{
5971		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5972		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5973		{
5974			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5975			DE_NULL,
5976
5977			*m_descriptorPool,
5978			1,
5979			&layout
5980		};
5981
5982		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5983
5984		{
5985			const vk::VkImageViewCreateInfo createInfo =
5986			{
5987				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5988				DE_NULL,
5989				0u,
5990
5991				context.getImage(),
5992				vk::VK_IMAGE_VIEW_TYPE_2D,
5993				vk::VK_FORMAT_R8G8B8A8_UNORM,
5994				vk::makeComponentMappingRGBA(),
5995				{
5996					vk::VK_IMAGE_ASPECT_COLOR_BIT,
5997					0u,
5998					1u,
5999					0u,
6000					1u
6001				}
6002			};
6003
6004			m_imageView = vk::createImageView(vkd, device, &createInfo);
6005		}
6006
6007		{
6008			const vk::VkSamplerCreateInfo createInfo =
6009			{
6010				vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
6011				DE_NULL,
6012				0u,
6013
6014				vk::VK_FILTER_NEAREST,
6015				vk::VK_FILTER_NEAREST,
6016
6017				vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
6018				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6019				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6020				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6021				0.0f,
6022				VK_FALSE,
6023				1.0f,
6024				VK_FALSE,
6025				vk::VK_COMPARE_OP_ALWAYS,
6026				0.0f,
6027				0.0f,
6028				vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
6029				VK_FALSE
6030			};
6031
6032			m_sampler = vk::createSampler(vkd, device, &createInfo);
6033		}
6034
6035		{
6036			const vk::VkDescriptorImageInfo			imageInfo	=
6037			{
6038				*m_sampler,
6039				*m_imageView,
6040				context.getImageLayout()
6041			};
6042			const vk::VkWriteDescriptorSet			write		=
6043			{
6044				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6045				DE_NULL,
6046				*m_descriptorSet,
6047				0u,
6048				0u,
6049				1u,
6050				vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
6051				&imageInfo,
6052				DE_NULL,
6053				DE_NULL,
6054			};
6055
6056			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6057		}
6058	}
6059}
6060
6061void RenderVertexSampledImage::submit (SubmitContext& context)
6062{
6063	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6064	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6065
6066	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6067
6068	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
6069	vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
6070}
6071
6072void RenderVertexSampledImage::verify (VerifyRenderPassContext& context, size_t)
6073{
6074	for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
6075	{
6076		const tcu::IVec3	size	= context.getReferenceImage().getAccess().getSize();
6077		const tcu::UVec4	pixel	= context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
6078
6079		if (pos % 2 == 0)
6080			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
6081		else
6082			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
6083	}
6084}
6085
6086class RenderFragmentUniformBuffer : public RenderPassCommand
6087{
6088public:
6089									RenderFragmentUniformBuffer		(void) {}
6090									~RenderFragmentUniformBuffer	(void);
6091
6092	const char*						getName							(void) const { return "RenderFragmentUniformBuffer"; }
6093	void							logPrepare						(TestLog&, size_t) const;
6094	void							logSubmit						(TestLog&, size_t) const;
6095	void							prepare							(PrepareRenderPassContext&);
6096	void							submit							(SubmitContext& context);
6097	void							verify							(VerifyRenderPassContext&, size_t);
6098
6099private:
6100	PipelineResources				m_resources;
6101	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6102	vector<vk::VkDescriptorSet>		m_descriptorSets;
6103
6104	vk::VkDeviceSize				m_bufferSize;
6105	size_t							m_targetWidth;
6106	size_t							m_targetHeight;
6107};
6108
6109RenderFragmentUniformBuffer::~RenderFragmentUniformBuffer (void)
6110{
6111}
6112
6113void RenderFragmentUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6114{
6115	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6116}
6117
6118void RenderFragmentUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6119{
6120	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6121}
6122
6123void RenderFragmentUniformBuffer::prepare (PrepareRenderPassContext& context)
6124{
6125	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6126	const vk::VkDevice							device					= context.getContext().getDevice();
6127	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6128	const deUint32								subpass					= 0;
6129	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6130	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
6131	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6132
6133	m_bufferSize	= de::min(context.getBufferSize(), (vk::VkDeviceSize)MAX_SIZE);
6134	m_targetWidth	= context.getTargetWidth();
6135	m_targetHeight	= context.getTargetHeight();
6136
6137	{
6138		const vk::VkDescriptorSetLayoutBinding binding =
6139		{
6140			0u,
6141			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6142			1,
6143			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6144			DE_NULL
6145		};
6146
6147		bindings.push_back(binding);
6148	}
6149	const vk::VkPushConstantRange pushConstantRange =
6150	{
6151		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6152		0u,
6153		8u
6154	};
6155
6156	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6157								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6158
6159	{
6160		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
6161		const vk::VkDescriptorPoolSize			poolSizes		=
6162		{
6163			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6164			descriptorCount
6165		};
6166		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6167		{
6168			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6169			DE_NULL,
6170			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6171
6172			descriptorCount,
6173			1u,
6174			&poolSizes,
6175		};
6176
6177		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6178		m_descriptorSets.resize(descriptorCount);
6179	}
6180
6181	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6182	{
6183		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6184		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6185		{
6186			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6187			DE_NULL,
6188
6189			*m_descriptorPool,
6190			1,
6191			&layout
6192		};
6193
6194		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6195
6196		{
6197			const vk::VkDescriptorBufferInfo		bufferInfo	=
6198			{
6199				context.getBuffer(),
6200				(vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
6201				m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6202					? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6203					: (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6204			};
6205			const vk::VkWriteDescriptorSet			write		=
6206			{
6207				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6208				DE_NULL,
6209				m_descriptorSets[descriptorSetNdx],
6210				0u,
6211				0u,
6212				1u,
6213				vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6214				DE_NULL,
6215				&bufferInfo,
6216				DE_NULL,
6217			};
6218
6219			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6220		}
6221	}
6222}
6223
6224void RenderFragmentUniformBuffer::submit (SubmitContext& context)
6225{
6226	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6227	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6228
6229	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6230
6231	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6232	{
6233		const struct
6234		{
6235			const deUint32	callId;
6236			const deUint32	valuesPerPixel;
6237		} callParams =
6238		{
6239			(deUint32)descriptorSetNdx,
6240			(deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight)
6241		};
6242
6243		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6244		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6245		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6246	}
6247}
6248
6249void RenderFragmentUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
6250{
6251	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight);
6252	const size_t	arraySize		= MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
6253	const size_t	arrayIntSize	= arraySize * 4;
6254
6255	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6256	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6257	{
6258		const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (arrayIntSize / valuesPerPixel), m_descriptorSets.size() - 1);
6259
6260		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6261		{
6262			const size_t	offset	= descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
6263			const deUint32	callId	= (deUint32)descriptorSetNdx;
6264
6265			const deUint32	id		= callId * ((deUint32)arrayIntSize / valuesPerPixel) + (deUint32)y * 256u + (deUint32)x;
6266
6267			if (y * 256u + x < callId * (arrayIntSize / valuesPerPixel))
6268				continue;
6269			else
6270			{
6271				deUint32 value = id;
6272
6273				for (deUint32 i = 0; i < valuesPerPixel; i++)
6274				{
6275					value	= ((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 0))
6276							| (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 1)) << 8u)
6277							| (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 2)) << 16u)
6278							| (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 3)) << 24u);
6279
6280				}
6281				const UVec4	vec	((value >>  0u) & 0xFFu,
6282								 (value >>  8u) & 0xFFu,
6283								 (value >> 16u) & 0xFFu,
6284								 (value >> 24u) & 0xFFu);
6285
6286				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6287			}
6288		}
6289	}
6290}
6291
6292class RenderFragmentStorageBuffer : public RenderPassCommand
6293{
6294public:
6295									RenderFragmentStorageBuffer		(void) {}
6296									~RenderFragmentStorageBuffer	(void);
6297
6298	const char*						getName							(void) const { return "RenderFragmentStorageBuffer"; }
6299	void							logPrepare						(TestLog&, size_t) const;
6300	void							logSubmit						(TestLog&, size_t) const;
6301	void							prepare							(PrepareRenderPassContext&);
6302	void							submit							(SubmitContext& context);
6303	void							verify							(VerifyRenderPassContext&, size_t);
6304
6305private:
6306	PipelineResources				m_resources;
6307	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6308	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
6309
6310	vk::VkDeviceSize				m_bufferSize;
6311	size_t							m_targetWidth;
6312	size_t							m_targetHeight;
6313};
6314
6315RenderFragmentStorageBuffer::~RenderFragmentStorageBuffer (void)
6316{
6317}
6318
6319void RenderFragmentStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6320{
6321	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline to render buffer as storage buffer." << TestLog::EndMessage;
6322}
6323
6324void RenderFragmentStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6325{
6326	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6327}
6328
6329void RenderFragmentStorageBuffer::prepare (PrepareRenderPassContext& context)
6330{
6331	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6332	const vk::VkDevice							device					= context.getContext().getDevice();
6333	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6334	const deUint32								subpass					= 0;
6335	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6336	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.frag"), 0));
6337	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6338
6339	m_bufferSize	= context.getBufferSize();
6340	m_targetWidth	= context.getTargetWidth();
6341	m_targetHeight	= context.getTargetHeight();
6342
6343	{
6344		const vk::VkDescriptorSetLayoutBinding binding =
6345		{
6346			0u,
6347			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6348			1,
6349			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6350			DE_NULL
6351		};
6352
6353		bindings.push_back(binding);
6354	}
6355	const vk::VkPushConstantRange pushConstantRange =
6356	{
6357		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6358		0u,
6359		12u
6360	};
6361
6362	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6363								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6364
6365	{
6366		const deUint32							descriptorCount	= 1;
6367		const vk::VkDescriptorPoolSize			poolSizes		=
6368		{
6369			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6370			descriptorCount
6371		};
6372		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6373		{
6374			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6375			DE_NULL,
6376			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6377
6378			descriptorCount,
6379			1u,
6380			&poolSizes,
6381		};
6382
6383		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6384	}
6385
6386	{
6387		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6388		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6389		{
6390			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6391			DE_NULL,
6392
6393			*m_descriptorPool,
6394			1,
6395			&layout
6396		};
6397
6398		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6399
6400		{
6401			const vk::VkDescriptorBufferInfo	bufferInfo	=
6402			{
6403				context.getBuffer(),
6404				0u,
6405				m_bufferSize
6406			};
6407			const vk::VkWriteDescriptorSet		write		=
6408			{
6409				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6410				DE_NULL,
6411				m_descriptorSet.get(),
6412				0u,
6413				0u,
6414				1u,
6415				vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6416				DE_NULL,
6417				&bufferInfo,
6418				DE_NULL,
6419			};
6420
6421			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6422		}
6423	}
6424}
6425
6426void RenderFragmentStorageBuffer::submit (SubmitContext& context)
6427{
6428	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6429	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6430
6431	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6432
6433	const struct
6434	{
6435		const deUint32	valuesPerPixel;
6436		const deUint32	bufferSize;
6437	} callParams =
6438	{
6439		(deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight),
6440		(deUint32)m_bufferSize
6441	};
6442
6443	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
6444	vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6445	vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6446}
6447
6448void RenderFragmentStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
6449{
6450	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight);
6451
6452	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6453	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6454	{
6455		const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6456
6457		deUint32 value = id;
6458
6459		for (deUint32 i = 0; i < valuesPerPixel; i++)
6460		{
6461			value	= (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 0)) << 0u)
6462					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 1)) << 8u)
6463					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 2)) << 16u)
6464					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 3)) << 24u);
6465
6466		}
6467		const UVec4	vec	((value >>  0u) & 0xFFu,
6468						 (value >>  8u) & 0xFFu,
6469						 (value >> 16u) & 0xFFu,
6470						 (value >> 24u) & 0xFFu);
6471
6472		context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6473	}
6474}
6475
6476class RenderFragmentUniformTexelBuffer : public RenderPassCommand
6477{
6478public:
6479									RenderFragmentUniformTexelBuffer	(void) {}
6480									~RenderFragmentUniformTexelBuffer	(void);
6481
6482	const char*						getName								(void) const { return "RenderFragmentUniformTexelBuffer"; }
6483	void							logPrepare							(TestLog&, size_t) const;
6484	void							logSubmit							(TestLog&, size_t) const;
6485	void							prepare								(PrepareRenderPassContext&);
6486	void							submit								(SubmitContext& context);
6487	void							verify								(VerifyRenderPassContext&, size_t);
6488
6489private:
6490	PipelineResources				m_resources;
6491	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6492	vector<vk::VkDescriptorSet>		m_descriptorSets;
6493	vector<vk::VkBufferView>		m_bufferViews;
6494
6495	const vk::DeviceInterface*		m_vkd;
6496	vk::VkDevice					m_device;
6497	vk::VkDeviceSize				m_bufferSize;
6498	deUint32						m_maxUniformTexelCount;
6499	size_t							m_targetWidth;
6500	size_t							m_targetHeight;
6501};
6502
6503RenderFragmentUniformTexelBuffer::~RenderFragmentUniformTexelBuffer (void)
6504{
6505	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6506	{
6507		if (!!m_bufferViews[bufferViewNdx])
6508		{
6509			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6510			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6511		}
6512	}
6513}
6514
6515void RenderFragmentUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6516{
6517	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6518}
6519
6520void RenderFragmentUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6521{
6522	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6523}
6524
6525void RenderFragmentUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
6526{
6527	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
6528	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
6529	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6530	const vk::VkDevice							device					= context.getContext().getDevice();
6531	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6532	const deUint32								subpass					= 0;
6533	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6534	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.frag"), 0));
6535	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6536
6537	m_device				= device;
6538	m_vkd					= &vkd;
6539	m_bufferSize			= context.getBufferSize();
6540	m_maxUniformTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6541	m_targetWidth			= context.getTargetWidth();
6542	m_targetHeight			= context.getTargetHeight();
6543
6544	{
6545		const vk::VkDescriptorSetLayoutBinding binding =
6546		{
6547			0u,
6548			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6549			1,
6550			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6551			DE_NULL
6552		};
6553
6554		bindings.push_back(binding);
6555	}
6556	const vk::VkPushConstantRange pushConstantRange =
6557	{
6558		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6559		0u,
6560		12u
6561	};
6562
6563	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6564								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6565
6566	{
6567		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 4));
6568		const vk::VkDescriptorPoolSize			poolSizes		=
6569		{
6570			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6571			descriptorCount
6572		};
6573		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6574		{
6575			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6576			DE_NULL,
6577			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6578
6579			descriptorCount,
6580			1u,
6581			&poolSizes,
6582		};
6583
6584		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6585		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6586		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6587	}
6588
6589	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6590	{
6591		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6592																? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6593																: m_maxUniformTexelCount * 4) / 4;
6594		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6595		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6596		{
6597			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6598			DE_NULL,
6599
6600			*m_descriptorPool,
6601			1,
6602			&layout
6603		};
6604
6605		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6606
6607		{
6608			const vk::VkBufferViewCreateInfo createInfo =
6609			{
6610				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6611				DE_NULL,
6612				0u,
6613
6614				context.getBuffer(),
6615				vk::VK_FORMAT_R32_UINT,
6616				descriptorSetNdx * m_maxUniformTexelCount * 4,
6617				count * 4
6618			};
6619
6620			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6621		}
6622
6623		{
6624			const vk::VkWriteDescriptorSet			write		=
6625			{
6626				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6627				DE_NULL,
6628				m_descriptorSets[descriptorSetNdx],
6629				0u,
6630				0u,
6631				1u,
6632				vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6633				DE_NULL,
6634				DE_NULL,
6635				&m_bufferViews[descriptorSetNdx]
6636			};
6637
6638			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6639		}
6640	}
6641}
6642
6643void RenderFragmentUniformTexelBuffer::submit (SubmitContext& context)
6644{
6645	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6646	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6647
6648	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6649
6650	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6651	{
6652		const struct
6653		{
6654			const deUint32	callId;
6655			const deUint32	valuesPerPixel;
6656			const deUint32	maxUniformTexelCount;
6657		} callParams =
6658		{
6659			(deUint32)descriptorSetNdx,
6660			(deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight),
6661			m_maxUniformTexelCount
6662		};
6663
6664		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6665		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6666		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6667	}
6668}
6669
6670void RenderFragmentUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6671{
6672	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight);
6673
6674	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6675	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6676	{
6677		const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxUniformTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6678
6679		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6680		{
6681			const size_t	offset	= descriptorSetNdx * m_maxUniformTexelCount * 4;
6682			const deUint32	callId	= (deUint32)descriptorSetNdx;
6683
6684			const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6685			const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6686									? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6687									: m_maxUniformTexelCount * 4) / 4;
6688
6689			if (y * 256u + x < callId * (m_maxUniformTexelCount / valuesPerPixel))
6690				continue;
6691			else
6692			{
6693				deUint32 value = id;
6694
6695				for (deUint32 i = 0; i < valuesPerPixel; i++)
6696				{
6697					value	= ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6698							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6699							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6700							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6701
6702				}
6703				const UVec4	vec	((value >>  0u) & 0xFFu,
6704								 (value >>  8u) & 0xFFu,
6705								 (value >> 16u) & 0xFFu,
6706								 (value >> 24u) & 0xFFu);
6707
6708				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6709			}
6710		}
6711	}
6712}
6713
6714class RenderFragmentStorageTexelBuffer : public RenderPassCommand
6715{
6716public:
6717									RenderFragmentStorageTexelBuffer	(void) {}
6718									~RenderFragmentStorageTexelBuffer	(void);
6719
6720	const char*						getName								(void) const { return "RenderFragmentStorageTexelBuffer"; }
6721	void							logPrepare							(TestLog&, size_t) const;
6722	void							logSubmit							(TestLog&, size_t) const;
6723	void							prepare								(PrepareRenderPassContext&);
6724	void							submit								(SubmitContext& context);
6725	void							verify								(VerifyRenderPassContext&, size_t);
6726
6727private:
6728	PipelineResources				m_resources;
6729	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6730	vector<vk::VkDescriptorSet>		m_descriptorSets;
6731	vector<vk::VkBufferView>		m_bufferViews;
6732
6733	const vk::DeviceInterface*		m_vkd;
6734	vk::VkDevice					m_device;
6735	vk::VkDeviceSize				m_bufferSize;
6736	deUint32						m_maxStorageTexelCount;
6737	size_t							m_targetWidth;
6738	size_t							m_targetHeight;
6739};
6740
6741RenderFragmentStorageTexelBuffer::~RenderFragmentStorageTexelBuffer (void)
6742{
6743	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6744	{
6745		if (!!m_bufferViews[bufferViewNdx])
6746		{
6747			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6748			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6749		}
6750	}
6751}
6752
6753void RenderFragmentStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6754{
6755	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
6756}
6757
6758void RenderFragmentStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6759{
6760	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6761}
6762
6763void RenderFragmentStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
6764{
6765	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
6766	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
6767	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6768	const vk::VkDevice							device					= context.getContext().getDevice();
6769	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6770	const deUint32								subpass					= 0;
6771	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6772	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.frag"), 0));
6773	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6774
6775	m_device				= device;
6776	m_vkd					= &vkd;
6777	m_bufferSize			= context.getBufferSize();
6778	m_maxStorageTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6779	m_targetWidth			= context.getTargetWidth();
6780	m_targetHeight			= context.getTargetHeight();
6781
6782	{
6783		const vk::VkDescriptorSetLayoutBinding binding =
6784		{
6785			0u,
6786			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6787			1,
6788			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6789			DE_NULL
6790		};
6791
6792		bindings.push_back(binding);
6793	}
6794	const vk::VkPushConstantRange pushConstantRange =
6795	{
6796		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6797		0u,
6798		16u
6799	};
6800
6801	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6802								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6803
6804	{
6805		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
6806		const vk::VkDescriptorPoolSize			poolSizes		=
6807		{
6808			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6809			descriptorCount
6810		};
6811		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6812		{
6813			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6814			DE_NULL,
6815			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6816
6817			descriptorCount,
6818			1u,
6819			&poolSizes,
6820		};
6821
6822		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6823		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6824		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6825	}
6826
6827	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6828	{
6829		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6830																? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6831																: m_maxStorageTexelCount * 4) / 4;
6832		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6833		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6834		{
6835			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6836			DE_NULL,
6837
6838			*m_descriptorPool,
6839			1,
6840			&layout
6841		};
6842
6843		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6844
6845		{
6846			const vk::VkBufferViewCreateInfo createInfo =
6847			{
6848				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6849				DE_NULL,
6850				0u,
6851
6852				context.getBuffer(),
6853				vk::VK_FORMAT_R32_UINT,
6854				descriptorSetNdx * m_maxStorageTexelCount * 4,
6855				count * 4
6856			};
6857
6858			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6859		}
6860
6861		{
6862			const vk::VkWriteDescriptorSet			write		=
6863			{
6864				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6865				DE_NULL,
6866				m_descriptorSets[descriptorSetNdx],
6867				0u,
6868				0u,
6869				1u,
6870				vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6871				DE_NULL,
6872				DE_NULL,
6873				&m_bufferViews[descriptorSetNdx]
6874			};
6875
6876			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6877		}
6878	}
6879}
6880
6881void RenderFragmentStorageTexelBuffer::submit (SubmitContext& context)
6882{
6883	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6884	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6885
6886	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6887
6888	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6889	{
6890		const struct
6891		{
6892			const deUint32	callId;
6893			const deUint32	valuesPerPixel;
6894			const deUint32	maxStorageTexelCount;
6895			const deUint32	width;
6896		} callParams =
6897		{
6898			(deUint32)descriptorSetNdx,
6899			(deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight),
6900			m_maxStorageTexelCount,
6901			(deUint32)(m_bufferSize < (descriptorSetNdx + 1u) * m_maxStorageTexelCount * 4u
6902								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4u
6903								: m_maxStorageTexelCount * 4u) / 4u
6904		};
6905
6906		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6907		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6908		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6909	}
6910}
6911
6912void RenderFragmentStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6913{
6914	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight);
6915
6916	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6917	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6918	{
6919		const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxStorageTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6920
6921		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6922		{
6923			const size_t	offset	= descriptorSetNdx * m_maxStorageTexelCount * 4;
6924			const deUint32	callId	= (deUint32)descriptorSetNdx;
6925
6926			const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6927			const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6928									? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6929									: m_maxStorageTexelCount * 4) / 4;
6930
6931			if (y * 256u + x < callId * (m_maxStorageTexelCount / valuesPerPixel))
6932				continue;
6933			else
6934			{
6935				deUint32 value = id;
6936
6937				for (deUint32 i = 0; i < valuesPerPixel; i++)
6938				{
6939					value	= ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6940							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6941							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6942							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6943
6944				}
6945				const UVec4	vec	((value >>  0u) & 0xFFu,
6946								 (value >>  8u) & 0xFFu,
6947								 (value >> 16u) & 0xFFu,
6948								 (value >> 24u) & 0xFFu);
6949
6950				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6951			}
6952		}
6953	}
6954}
6955
6956class RenderFragmentStorageImage : public RenderPassCommand
6957{
6958public:
6959									RenderFragmentStorageImage	(void) {}
6960									~RenderFragmentStorageImage	(void);
6961
6962	const char*						getName						(void) const { return "RenderFragmentStorageImage"; }
6963	void							logPrepare					(TestLog&, size_t) const;
6964	void							logSubmit					(TestLog&, size_t) const;
6965	void							prepare						(PrepareRenderPassContext&);
6966	void							submit						(SubmitContext& context);
6967	void							verify						(VerifyRenderPassContext&, size_t);
6968
6969private:
6970	PipelineResources				m_resources;
6971	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6972	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
6973	vk::Move<vk::VkImageView>		m_imageView;
6974};
6975
6976RenderFragmentStorageImage::~RenderFragmentStorageImage (void)
6977{
6978}
6979
6980void RenderFragmentStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
6981{
6982	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
6983}
6984
6985void RenderFragmentStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
6986{
6987	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
6988}
6989
6990void RenderFragmentStorageImage::prepare (PrepareRenderPassContext& context)
6991{
6992	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6993	const vk::VkDevice							device					= context.getContext().getDevice();
6994	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6995	const deUint32								subpass					= 0;
6996	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6997	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.frag"), 0));
6998	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6999
7000	{
7001		const vk::VkDescriptorSetLayoutBinding binding =
7002		{
7003			0u,
7004			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7005			1,
7006			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7007			DE_NULL
7008		};
7009
7010		bindings.push_back(binding);
7011	}
7012
7013	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7014								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7015
7016	{
7017		const vk::VkDescriptorPoolSize			poolSizes		=
7018		{
7019			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7020			1
7021		};
7022		const vk::VkDescriptorPoolCreateInfo	createInfo		=
7023		{
7024			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7025			DE_NULL,
7026			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7027
7028			1u,
7029			1u,
7030			&poolSizes,
7031		};
7032
7033		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7034	}
7035
7036	{
7037		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
7038		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
7039		{
7040			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7041			DE_NULL,
7042
7043			*m_descriptorPool,
7044			1,
7045			&layout
7046		};
7047
7048		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7049
7050		{
7051			const vk::VkImageViewCreateInfo createInfo =
7052			{
7053				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7054				DE_NULL,
7055				0u,
7056
7057				context.getImage(),
7058				vk::VK_IMAGE_VIEW_TYPE_2D,
7059				vk::VK_FORMAT_R8G8B8A8_UNORM,
7060				vk::makeComponentMappingRGBA(),
7061				{
7062					vk::VK_IMAGE_ASPECT_COLOR_BIT,
7063					0u,
7064					1u,
7065					0u,
7066					1u
7067				}
7068			};
7069
7070			m_imageView = vk::createImageView(vkd, device, &createInfo);
7071		}
7072
7073		{
7074			const vk::VkDescriptorImageInfo			imageInfo	=
7075			{
7076				0,
7077				*m_imageView,
7078				context.getImageLayout()
7079			};
7080			const vk::VkWriteDescriptorSet			write		=
7081			{
7082				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7083				DE_NULL,
7084				*m_descriptorSet,
7085				0u,
7086				0u,
7087				1u,
7088				vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7089				&imageInfo,
7090				DE_NULL,
7091				DE_NULL,
7092			};
7093
7094			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7095		}
7096	}
7097}
7098
7099void RenderFragmentStorageImage::submit (SubmitContext& context)
7100{
7101	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
7102	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
7103
7104	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7105
7106	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7107	vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
7108}
7109
7110void RenderFragmentStorageImage::verify (VerifyRenderPassContext& context, size_t)
7111{
7112	const UVec2		size			= UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7113	const deUint32	valuesPerPixel	= de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7114
7115	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7116	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7117	{
7118		UVec4	value	= UVec4(x, y, 0u, 0u);
7119
7120		for (deUint32 i = 0; i < valuesPerPixel; i++)
7121		{
7122			const UVec2	pos			= UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7123			const Vec4	floatValue	= context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7124
7125			value = UVec4((deUint32)(floatValue.x() * 255.0f),
7126						  (deUint32)(floatValue.y() * 255.0f),
7127						  (deUint32)(floatValue.z() * 255.0f),
7128						  (deUint32)(floatValue.w() * 255.0f));
7129
7130		}
7131		context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7132	}
7133}
7134
7135class RenderFragmentSampledImage : public RenderPassCommand
7136{
7137public:
7138				RenderFragmentSampledImage	(void) {}
7139				~RenderFragmentSampledImage	(void);
7140
7141	const char*	getName						(void) const { return "RenderFragmentSampledImage"; }
7142	void		logPrepare					(TestLog&, size_t) const;
7143	void		logSubmit					(TestLog&, size_t) const;
7144	void		prepare						(PrepareRenderPassContext&);
7145	void		submit						(SubmitContext& context);
7146	void		verify						(VerifyRenderPassContext&, size_t);
7147
7148private:
7149	PipelineResources				m_resources;
7150	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
7151	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
7152	vk::Move<vk::VkImageView>		m_imageView;
7153	vk::Move<vk::VkSampler>			m_sampler;
7154};
7155
7156RenderFragmentSampledImage::~RenderFragmentSampledImage (void)
7157{
7158}
7159
7160void RenderFragmentSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
7161{
7162	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
7163}
7164
7165void RenderFragmentSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
7166{
7167	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
7168}
7169
7170void RenderFragmentSampledImage::prepare (PrepareRenderPassContext& context)
7171{
7172	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
7173	const vk::VkDevice							device					= context.getContext().getDevice();
7174	const vk::VkRenderPass						renderPass				= context.getRenderPass();
7175	const deUint32								subpass					= 0;
7176	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7177	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.frag"), 0));
7178	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
7179
7180	{
7181		const vk::VkDescriptorSetLayoutBinding binding =
7182		{
7183			0u,
7184			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7185			1,
7186			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7187			DE_NULL
7188		};
7189
7190		bindings.push_back(binding);
7191	}
7192
7193	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7194								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7195
7196	{
7197		const vk::VkDescriptorPoolSize			poolSizes		=
7198		{
7199			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7200			1
7201		};
7202		const vk::VkDescriptorPoolCreateInfo	createInfo		=
7203		{
7204			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7205			DE_NULL,
7206			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7207
7208			1u,
7209			1u,
7210			&poolSizes,
7211		};
7212
7213		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7214	}
7215
7216	{
7217		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
7218		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
7219		{
7220			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7221			DE_NULL,
7222
7223			*m_descriptorPool,
7224			1,
7225			&layout
7226		};
7227
7228		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7229
7230		{
7231			const vk::VkImageViewCreateInfo createInfo =
7232			{
7233				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7234				DE_NULL,
7235				0u,
7236
7237				context.getImage(),
7238				vk::VK_IMAGE_VIEW_TYPE_2D,
7239				vk::VK_FORMAT_R8G8B8A8_UNORM,
7240				vk::makeComponentMappingRGBA(),
7241				{
7242					vk::VK_IMAGE_ASPECT_COLOR_BIT,
7243					0u,
7244					1u,
7245					0u,
7246					1u
7247				}
7248			};
7249
7250			m_imageView = vk::createImageView(vkd, device, &createInfo);
7251		}
7252
7253		{
7254			const vk::VkSamplerCreateInfo createInfo =
7255			{
7256				vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
7257				DE_NULL,
7258				0u,
7259
7260				vk::VK_FILTER_NEAREST,
7261				vk::VK_FILTER_NEAREST,
7262
7263				vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
7264				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7265				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7266				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7267				0.0f,
7268				VK_FALSE,
7269				1.0f,
7270				VK_FALSE,
7271				vk::VK_COMPARE_OP_ALWAYS,
7272				0.0f,
7273				0.0f,
7274				vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
7275				VK_FALSE
7276			};
7277
7278			m_sampler = vk::createSampler(vkd, device, &createInfo);
7279		}
7280
7281		{
7282			const vk::VkDescriptorImageInfo			imageInfo	=
7283			{
7284				*m_sampler,
7285				*m_imageView,
7286				context.getImageLayout()
7287			};
7288			const vk::VkWriteDescriptorSet			write		=
7289			{
7290				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7291				DE_NULL,
7292				*m_descriptorSet,
7293				0u,
7294				0u,
7295				1u,
7296				vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7297				&imageInfo,
7298				DE_NULL,
7299				DE_NULL,
7300			};
7301
7302			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7303		}
7304	}
7305}
7306
7307void RenderFragmentSampledImage::submit (SubmitContext& context)
7308{
7309	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
7310	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
7311
7312	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7313
7314	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7315	vkd.cmdDraw(commandBuffer, 6u, 1u, 0u, 0u);
7316}
7317
7318void RenderFragmentSampledImage::verify (VerifyRenderPassContext& context, size_t)
7319{
7320	const UVec2		size			= UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7321	const deUint32	valuesPerPixel	= de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7322
7323	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7324	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7325	{
7326		UVec4	value	= UVec4(x, y, 0u, 0u);
7327
7328		for (deUint32 i = 0; i < valuesPerPixel; i++)
7329		{
7330			const UVec2	pos			= UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7331			const Vec4	floatValue	= context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7332
7333			value = UVec4((deUint32)(floatValue.x() * 255.0f),
7334						  (deUint32)(floatValue.y() * 255.0f),
7335						  (deUint32)(floatValue.z() * 255.0f),
7336						  (deUint32)(floatValue.w() * 255.0f));
7337
7338		}
7339
7340		context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7341	}
7342}
7343
7344enum Op
7345{
7346	OP_MAP,
7347	OP_UNMAP,
7348
7349	OP_MAP_FLUSH,
7350	OP_MAP_INVALIDATE,
7351
7352	OP_MAP_READ,
7353	OP_MAP_WRITE,
7354	OP_MAP_MODIFY,
7355
7356	OP_BUFFER_CREATE,
7357	OP_BUFFER_DESTROY,
7358	OP_BUFFER_BINDMEMORY,
7359
7360	OP_QUEUE_WAIT_FOR_IDLE,
7361	OP_DEVICE_WAIT_FOR_IDLE,
7362
7363	OP_COMMAND_BUFFER_BEGIN,
7364	OP_COMMAND_BUFFER_END,
7365
7366	// Buffer transfer operations
7367	OP_BUFFER_FILL,
7368	OP_BUFFER_UPDATE,
7369
7370	OP_BUFFER_COPY_TO_BUFFER,
7371	OP_BUFFER_COPY_FROM_BUFFER,
7372
7373	OP_BUFFER_COPY_TO_IMAGE,
7374	OP_BUFFER_COPY_FROM_IMAGE,
7375
7376	OP_IMAGE_CREATE,
7377	OP_IMAGE_DESTROY,
7378	OP_IMAGE_BINDMEMORY,
7379
7380	OP_IMAGE_TRANSITION_LAYOUT,
7381
7382	OP_IMAGE_COPY_TO_BUFFER,
7383	OP_IMAGE_COPY_FROM_BUFFER,
7384
7385	OP_IMAGE_COPY_TO_IMAGE,
7386	OP_IMAGE_COPY_FROM_IMAGE,
7387
7388	OP_IMAGE_BLIT_TO_IMAGE,
7389	OP_IMAGE_BLIT_FROM_IMAGE,
7390
7391	OP_IMAGE_RESOLVE,
7392
7393	OP_PIPELINE_BARRIER_GLOBAL,
7394	OP_PIPELINE_BARRIER_BUFFER,
7395	OP_PIPELINE_BARRIER_IMAGE,
7396
7397	// Renderpass operations
7398	OP_RENDERPASS_BEGIN,
7399	OP_RENDERPASS_END,
7400
7401	// Commands inside render pass
7402	OP_RENDER_VERTEX_BUFFER,
7403	OP_RENDER_INDEX_BUFFER,
7404
7405	OP_RENDER_VERTEX_UNIFORM_BUFFER,
7406	OP_RENDER_FRAGMENT_UNIFORM_BUFFER,
7407
7408	OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER,
7409	OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER,
7410
7411	OP_RENDER_VERTEX_STORAGE_BUFFER,
7412	OP_RENDER_FRAGMENT_STORAGE_BUFFER,
7413
7414	OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER,
7415	OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER,
7416
7417	OP_RENDER_VERTEX_STORAGE_IMAGE,
7418	OP_RENDER_FRAGMENT_STORAGE_IMAGE,
7419
7420	OP_RENDER_VERTEX_SAMPLED_IMAGE,
7421	OP_RENDER_FRAGMENT_SAMPLED_IMAGE,
7422};
7423
7424enum Stage
7425{
7426	STAGE_HOST,
7427	STAGE_COMMAND_BUFFER,
7428
7429	STAGE_RENDER_PASS
7430};
7431
7432vk::VkAccessFlags getWriteAccessFlags (void)
7433{
7434	return vk::VK_ACCESS_SHADER_WRITE_BIT
7435		| vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
7436		| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
7437		| vk::VK_ACCESS_TRANSFER_WRITE_BIT
7438		| vk::VK_ACCESS_HOST_WRITE_BIT
7439		| vk::VK_ACCESS_MEMORY_WRITE_BIT;
7440}
7441
7442bool isWriteAccess (vk::VkAccessFlagBits access)
7443{
7444	return (getWriteAccessFlags() & access) != 0;
7445}
7446
7447class CacheState
7448{
7449public:
7450									CacheState				(vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses);
7451
7452	bool							isValid					(vk::VkPipelineStageFlagBits	stage,
7453															 vk::VkAccessFlagBits			access) const;
7454
7455	void							perform					(vk::VkPipelineStageFlagBits	stage,
7456															 vk::VkAccessFlagBits			access);
7457
7458	void							submitCommandBuffer		(void);
7459	void							waitForIdle				(void);
7460
7461	void							getFullBarrier			(vk::VkPipelineStageFlags&	srcStages,
7462															 vk::VkAccessFlags&			srcAccesses,
7463															 vk::VkPipelineStageFlags&	dstStages,
7464															 vk::VkAccessFlags&			dstAccesses) const;
7465
7466	void							barrier					(vk::VkPipelineStageFlags	srcStages,
7467															 vk::VkAccessFlags			srcAccesses,
7468															 vk::VkPipelineStageFlags	dstStages,
7469															 vk::VkAccessFlags			dstAccesses);
7470
7471	void							imageLayoutBarrier		(vk::VkPipelineStageFlags	srcStages,
7472															 vk::VkAccessFlags			srcAccesses,
7473															 vk::VkPipelineStageFlags	dstStages,
7474															 vk::VkAccessFlags			dstAccesses);
7475
7476	void							checkImageLayoutBarrier	(vk::VkPipelineStageFlags	srcStages,
7477															 vk::VkAccessFlags			srcAccesses,
7478															 vk::VkPipelineStageFlags	dstStages,
7479															 vk::VkAccessFlags			dstAccesses);
7480
7481	// Everything is clean and there is no need for barriers
7482	bool							isClean					(void) const;
7483
7484	vk::VkPipelineStageFlags		getAllowedStages		(void) const { return m_allowedStages; }
7485	vk::VkAccessFlags				getAllowedAcceses		(void) const { return m_allowedAccesses; }
7486private:
7487	// Limit which stages and accesses are used by the CacheState tracker
7488	const vk::VkPipelineStageFlags	m_allowedStages;
7489	const vk::VkAccessFlags			m_allowedAccesses;
7490
7491	// [dstStage][srcStage] = srcAccesses
7492	// In stage dstStage write srcAccesses from srcStage are not yet available
7493	vk::VkAccessFlags				m_unavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7494	// Latest pipeline transition is not available in stage
7495	bool							m_unavailableLayoutTransition[PIPELINESTAGE_LAST];
7496	// [dstStage] = dstAccesses
7497	// In stage dstStage ops with dstAccesses are not yet visible
7498	vk::VkAccessFlags				m_invisibleOperations[PIPELINESTAGE_LAST];
7499
7500	// [dstStage] = srcStage
7501	// Memory operation in srcStage have not completed before dstStage
7502	vk::VkPipelineStageFlags		m_incompleteOperations[PIPELINESTAGE_LAST];
7503};
7504
7505CacheState::CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses)
7506	: m_allowedStages	(allowedStages)
7507	, m_allowedAccesses	(allowedAccesses)
7508{
7509	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7510	{
7511		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7512
7513		if ((dstStage_ & m_allowedStages) == 0)
7514			continue;
7515
7516		// All operations are initially visible
7517		m_invisibleOperations[dstStage] = 0;
7518
7519		// There are no incomplete read operations initially
7520		m_incompleteOperations[dstStage] = 0;
7521
7522		// There are no incomplete layout transitions
7523		m_unavailableLayoutTransition[dstStage] = false;
7524
7525		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7526		{
7527			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7528
7529			if ((srcStage_ & m_allowedStages) == 0)
7530				continue;
7531
7532			// There are no write operations that are not yet available
7533			// initially.
7534			m_unavailableWriteOperations[dstStage][srcStage] = 0;
7535		}
7536	}
7537}
7538
7539bool CacheState::isValid (vk::VkPipelineStageFlagBits	stage,
7540						  vk::VkAccessFlagBits			access) const
7541{
7542	DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7543	DE_ASSERT((stage & (~m_allowedStages)) == 0);
7544
7545	const PipelineStage	dstStage	= pipelineStageFlagToPipelineStage(stage);
7546
7547	// Previous operations are not visible to access on stage
7548	if (m_unavailableLayoutTransition[dstStage] || (m_invisibleOperations[dstStage] & access) != 0)
7549		return false;
7550
7551	if (isWriteAccess(access))
7552	{
7553		// Memory operations from other stages have not completed before
7554		// dstStage
7555		if (m_incompleteOperations[dstStage] != 0)
7556			return false;
7557	}
7558
7559	return true;
7560}
7561
7562void CacheState::perform (vk::VkPipelineStageFlagBits	stage,
7563						  vk::VkAccessFlagBits			access)
7564{
7565	DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7566	DE_ASSERT((stage & (~m_allowedStages)) == 0);
7567
7568	const PipelineStage srcStage = pipelineStageFlagToPipelineStage(stage);
7569
7570	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7571	{
7572		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7573
7574		if ((dstStage_ & m_allowedStages) == 0)
7575			continue;
7576
7577		// Mark stage as incomplete for all stages
7578		m_incompleteOperations[dstStage] |= stage;
7579
7580		if (isWriteAccess(access))
7581		{
7582			// Mark all accesses from all stages invisible
7583			m_invisibleOperations[dstStage] |= m_allowedAccesses;
7584
7585			// Mark write access from srcStage unavailable to all stages
7586			m_unavailableWriteOperations[dstStage][srcStage] |= access;
7587		}
7588	}
7589}
7590
7591void CacheState::submitCommandBuffer (void)
7592{
7593	// Flush all host writes and reads
7594	barrier(m_allowedStages & vk::VK_PIPELINE_STAGE_HOST_BIT,
7595			m_allowedAccesses & (vk::VK_ACCESS_HOST_READ_BIT | vk::VK_ACCESS_HOST_WRITE_BIT),
7596			m_allowedStages,
7597			m_allowedAccesses);
7598}
7599
7600void CacheState::waitForIdle (void)
7601{
7602	// Make all writes available
7603	barrier(m_allowedStages,
7604			m_allowedAccesses & getWriteAccessFlags(),
7605			m_allowedStages,
7606			0);
7607
7608	// Make all writes visible on device side
7609	barrier(m_allowedStages,
7610			0,
7611			m_allowedStages & (~vk::VK_PIPELINE_STAGE_HOST_BIT),
7612			m_allowedAccesses);
7613}
7614
7615void CacheState::getFullBarrier (vk::VkPipelineStageFlags&	srcStages,
7616								 vk::VkAccessFlags&			srcAccesses,
7617								 vk::VkPipelineStageFlags&	dstStages,
7618								 vk::VkAccessFlags&			dstAccesses) const
7619{
7620	srcStages	= 0;
7621	srcAccesses	= 0;
7622	dstStages	= 0;
7623	dstAccesses	= 0;
7624
7625	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7626	{
7627		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7628
7629		if ((dstStage_ & m_allowedStages) == 0)
7630			continue;
7631
7632		// Make sure all previous operation are complete in all stages
7633		if (m_incompleteOperations[dstStage])
7634		{
7635			dstStages |= dstStage_;
7636			srcStages |= m_incompleteOperations[dstStage];
7637		}
7638
7639		// Make sure all read operations are visible in dstStage
7640		if (m_invisibleOperations[dstStage])
7641		{
7642			dstStages |= dstStage_;
7643			dstAccesses |= m_invisibleOperations[dstStage];
7644		}
7645
7646		// Make sure all write operations fro mall stages are available
7647		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7648		{
7649			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7650
7651			if ((srcStage_ & m_allowedStages) == 0)
7652				continue;
7653
7654			if (m_unavailableWriteOperations[dstStage][srcStage])
7655			{
7656				dstStages |= dstStage_;
7657				srcStages |= dstStage_;
7658				srcAccesses |= m_unavailableWriteOperations[dstStage][srcStage];
7659			}
7660
7661			if (m_unavailableLayoutTransition[dstStage] && !m_unavailableLayoutTransition[srcStage])
7662			{
7663				// Add dependency between srcStage and dstStage if layout transition has not completed in dstStage,
7664				// but has completed in srcStage.
7665				dstStages |= dstStage_;
7666				srcStages |= dstStage_;
7667			}
7668		}
7669	}
7670
7671	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7672	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7673	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7674	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7675}
7676
7677void CacheState::checkImageLayoutBarrier (vk::VkPipelineStageFlags	srcStages,
7678										 vk::VkAccessFlags			srcAccesses,
7679										 vk::VkPipelineStageFlags	dstStages,
7680										 vk::VkAccessFlags			dstAccesses)
7681{
7682	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7683	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7684	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7685	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7686
7687	DE_UNREF(srcStages);
7688	DE_UNREF(srcAccesses);
7689
7690	DE_UNREF(dstStages);
7691	DE_UNREF(dstAccesses);
7692
7693#if defined(DE_DEBUG)
7694	// Check that all stages have completed before srcStages or are in srcStages.
7695	{
7696		vk::VkPipelineStageFlags completedStages = srcStages;
7697
7698		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7699		{
7700			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7701
7702			if ((srcStage_ & srcStages) == 0)
7703				continue;
7704
7705			completedStages |= (~m_incompleteOperations[srcStage]);
7706		}
7707
7708		DE_ASSERT((completedStages & m_allowedStages) == m_allowedStages);
7709	}
7710
7711	// Check that any write is available at least in one stage. Since all stages are complete even single flush is enough.
7712	if ((getWriteAccessFlags() & m_allowedAccesses) != 0 && (srcAccesses & getWriteAccessFlags()) == 0)
7713	{
7714		bool anyWriteAvailable = false;
7715
7716		for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7717		{
7718			const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7719
7720			if ((dstStage_ & m_allowedStages) == 0)
7721				continue;
7722
7723			for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7724			{
7725				const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7726
7727				if ((srcStage_ & m_allowedStages) == 0)
7728					continue;
7729
7730				if (m_unavailableWriteOperations[dstStage][srcStage] != (getWriteAccessFlags() & m_allowedAccesses))
7731				{
7732					anyWriteAvailable = true;
7733					break;
7734				}
7735			}
7736		}
7737
7738		DE_ASSERT(anyWriteAvailable);
7739	}
7740#endif
7741}
7742
7743void CacheState::imageLayoutBarrier (vk::VkPipelineStageFlags	srcStages,
7744									 vk::VkAccessFlags			srcAccesses,
7745									 vk::VkPipelineStageFlags	dstStages,
7746									 vk::VkAccessFlags			dstAccesses)
7747{
7748	checkImageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
7749
7750	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7751	{
7752		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7753
7754		if ((dstStage_ & m_allowedStages) == 0)
7755			continue;
7756
7757		// All stages are incomplete after the barrier except each dstStage in it self.
7758		m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
7759
7760		// All memory operations are invisible unless they are listed in dstAccess
7761		m_invisibleOperations[dstStage] = m_allowedAccesses & (~dstAccesses);
7762
7763		// Layout transition is unavailable in stage unless it was listed in dstStages
7764		m_unavailableLayoutTransition[dstStage]= (dstStage_ & dstStages) == 0;
7765
7766		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7767		{
7768			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7769
7770			if ((srcStage_ & m_allowedStages) == 0)
7771				continue;
7772
7773			// All write operations are available after layout transition
7774			m_unavailableWriteOperations[dstStage][srcStage] = 0;
7775		}
7776	}
7777}
7778
7779void CacheState::barrier (vk::VkPipelineStageFlags	srcStages,
7780						  vk::VkAccessFlags			srcAccesses,
7781						  vk::VkPipelineStageFlags	dstStages,
7782						  vk::VkAccessFlags			dstAccesses)
7783{
7784	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7785	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7786	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7787	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7788
7789	// Transitivity
7790	{
7791		vk::VkPipelineStageFlags		oldIncompleteOperations[PIPELINESTAGE_LAST];
7792		vk::VkAccessFlags				oldUnavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7793		bool							oldUnavailableLayoutTransition[PIPELINESTAGE_LAST];
7794
7795		deMemcpy(oldIncompleteOperations, m_incompleteOperations, sizeof(oldIncompleteOperations));
7796		deMemcpy(oldUnavailableWriteOperations, m_unavailableWriteOperations, sizeof(oldUnavailableWriteOperations));
7797		deMemcpy(oldUnavailableLayoutTransition, m_unavailableLayoutTransition, sizeof(oldUnavailableLayoutTransition));
7798
7799		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7800		{
7801			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7802
7803			if ((srcStage_ & srcStages) == 0)
7804				continue;
7805
7806			for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7807			{
7808				const PipelineStage	dstStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7809
7810				if ((dstStage_ & dstStages) == 0)
7811					continue;
7812
7813				// Stages that have completed before srcStage have also completed before dstStage
7814				m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
7815
7816				// Image layout transition in srcStage are now available in dstStage
7817				m_unavailableLayoutTransition[dstStage] &= oldUnavailableLayoutTransition[srcStage];
7818
7819				for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
7820				{
7821					const PipelineStage	sharedStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
7822
7823					if ((sharedStage_ & m_allowedStages) == 0)
7824						continue;
7825
7826					// Writes that are available in srcStage are also available in dstStage
7827					m_unavailableWriteOperations[dstStage][sharedStage] &= oldUnavailableWriteOperations[srcStage][sharedStage];
7828				}
7829			}
7830		}
7831	}
7832
7833	// Barrier
7834	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7835	{
7836		const PipelineStage	dstStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7837		bool				allWritesAvailable	= true;
7838
7839		if ((dstStage_ & dstStages) == 0)
7840			continue;
7841
7842		// Operations in srcStages have completed before any stage in dstStages
7843		m_incompleteOperations[dstStage] &= ~srcStages;
7844
7845		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7846		{
7847			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7848
7849			if ((srcStage_ & m_allowedStages) == 0)
7850				continue;
7851
7852			// Make srcAccesses from srcStage available in dstStage
7853			if ((srcStage_ & srcStages) != 0)
7854				m_unavailableWriteOperations[dstStage][srcStage] &= ~srcAccesses;
7855
7856			if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7857				allWritesAvailable = false;
7858		}
7859
7860		// If all writes are available in dstStage make dstAccesses also visible
7861		if (allWritesAvailable)
7862			m_invisibleOperations[dstStage] &= ~dstAccesses;
7863	}
7864}
7865
7866bool CacheState::isClean (void) const
7867{
7868	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7869	{
7870		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7871
7872		if ((dstStage_ & m_allowedStages) == 0)
7873			continue;
7874
7875		// Some operations are not visible to some stages
7876		if (m_invisibleOperations[dstStage] != 0)
7877			return false;
7878
7879		// There are operation that have not completed yet
7880		if (m_incompleteOperations[dstStage] != 0)
7881			return false;
7882
7883		// Layout transition has not completed yet
7884		if (m_unavailableLayoutTransition[dstStage])
7885			return false;
7886
7887		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7888		{
7889			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7890
7891			if ((srcStage_ & m_allowedStages) == 0)
7892				continue;
7893
7894			// Some write operations are not available yet
7895			if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7896				return false;
7897		}
7898	}
7899
7900	return true;
7901}
7902
7903bool layoutSupportedByUsage (Usage usage, vk::VkImageLayout layout)
7904{
7905	switch (layout)
7906	{
7907		case vk::VK_IMAGE_LAYOUT_GENERAL:
7908			return true;
7909
7910		case vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
7911			return (usage & USAGE_COLOR_ATTACHMENT) != 0;
7912
7913		case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
7914			return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7915
7916		case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
7917			return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7918
7919		case vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
7920			// \todo [2016-03-09 mika] Should include input attachment
7921			return (usage & USAGE_SAMPLED_IMAGE) != 0;
7922
7923		case vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
7924			return (usage & USAGE_TRANSFER_SRC) != 0;
7925
7926		case vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
7927			return (usage & USAGE_TRANSFER_DST) != 0;
7928
7929		case vk::VK_IMAGE_LAYOUT_PREINITIALIZED:
7930			return true;
7931
7932		default:
7933			DE_FATAL("Unknown layout");
7934			return false;
7935	}
7936}
7937
7938size_t getNumberOfSupportedLayouts (Usage usage)
7939{
7940	const vk::VkImageLayout layouts[] =
7941	{
7942		vk::VK_IMAGE_LAYOUT_GENERAL,
7943		vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7944		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7945		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7946		vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7947		vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7948		vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7949	};
7950	size_t supportedLayoutCount = 0;
7951
7952	for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7953	{
7954		const vk::VkImageLayout layout = layouts[layoutNdx];
7955
7956		if (layoutSupportedByUsage(usage, layout))
7957			supportedLayoutCount++;
7958	}
7959
7960	return supportedLayoutCount;
7961}
7962
7963vk::VkImageLayout getRandomNextLayout (de::Random&			rng,
7964									   Usage				usage,
7965									   vk::VkImageLayout	previousLayout)
7966{
7967	const vk::VkImageLayout	layouts[] =
7968	{
7969		vk::VK_IMAGE_LAYOUT_GENERAL,
7970		vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7971		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7972		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7973		vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7974		vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7975		vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7976	};
7977	const size_t			supportedLayoutCount = getNumberOfSupportedLayouts(usage);
7978
7979	DE_ASSERT(supportedLayoutCount > 0);
7980
7981	size_t nextLayoutNdx = ((size_t)rng.getUint64()) % (previousLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
7982														? supportedLayoutCount
7983														: supportedLayoutCount - 1);
7984
7985	for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7986	{
7987		const vk::VkImageLayout layout = layouts[layoutNdx];
7988
7989		if (layoutSupportedByUsage(usage, layout) && layout != previousLayout)
7990		{
7991			if (nextLayoutNdx == 0)
7992				return layout;
7993			else
7994				nextLayoutNdx--;
7995		}
7996	}
7997
7998	DE_FATAL("Unreachable");
7999	return vk::VK_IMAGE_LAYOUT_UNDEFINED;
8000}
8001
8002struct State
8003{
8004	State (Usage usage, deUint32 seed)
8005		: stage					(STAGE_HOST)
8006		, cache					(usageToStageFlags(usage), usageToAccessFlags(usage))
8007		, rng					(seed)
8008		, mapped				(false)
8009		, hostInvalidated		(true)
8010		, hostFlushed			(true)
8011		, memoryDefined			(false)
8012		, hasBuffer				(false)
8013		, hasBoundBufferMemory	(false)
8014		, hasImage				(false)
8015		, hasBoundImageMemory	(false)
8016		, imageLayout			(vk::VK_IMAGE_LAYOUT_UNDEFINED)
8017		, imageDefined			(false)
8018		, queueIdle				(true)
8019		, deviceIdle			(true)
8020		, commandBufferIsEmpty	(true)
8021		, renderPassIsEmpty		(true)
8022	{
8023	}
8024
8025	Stage				stage;
8026	CacheState			cache;
8027	de::Random			rng;
8028
8029	bool				mapped;
8030	bool				hostInvalidated;
8031	bool				hostFlushed;
8032	bool				memoryDefined;
8033
8034	bool				hasBuffer;
8035	bool				hasBoundBufferMemory;
8036
8037	bool				hasImage;
8038	bool				hasBoundImageMemory;
8039	vk::VkImageLayout	imageLayout;
8040	bool				imageDefined;
8041
8042	bool				queueIdle;
8043	bool				deviceIdle;
8044
8045	bool				commandBufferIsEmpty;
8046	bool				renderPassIsEmpty;
8047};
8048
8049void getAvailableOps (const State& state, bool supportsBuffers, bool supportsImages, Usage usage, vector<Op>& ops)
8050{
8051	if (state.stage == STAGE_HOST)
8052	{
8053		if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
8054		{
8055			// Host memory operations
8056			if (state.mapped)
8057			{
8058				ops.push_back(OP_UNMAP);
8059
8060				// Avoid flush and finish if they are not needed
8061				if (!state.hostFlushed)
8062					ops.push_back(OP_MAP_FLUSH);
8063
8064				if (!state.hostInvalidated
8065					&& state.queueIdle
8066					&& ((usage & USAGE_HOST_READ) == 0
8067						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8068					&& ((usage & USAGE_HOST_WRITE) == 0
8069						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)))
8070				{
8071					ops.push_back(OP_MAP_INVALIDATE);
8072				}
8073
8074				if (usage & USAGE_HOST_READ
8075					&& usage & USAGE_HOST_WRITE
8076					&& state.memoryDefined
8077					&& state.hostInvalidated
8078					&& state.queueIdle
8079					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)
8080					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8081				{
8082					ops.push_back(OP_MAP_MODIFY);
8083				}
8084
8085				if (usage & USAGE_HOST_READ
8086					&& state.memoryDefined
8087					&& state.hostInvalidated
8088					&& state.queueIdle
8089					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8090				{
8091					ops.push_back(OP_MAP_READ);
8092				}
8093
8094				if (usage & USAGE_HOST_WRITE
8095					&& state.hostInvalidated
8096					&& state.queueIdle
8097					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT))
8098				{
8099					ops.push_back(OP_MAP_WRITE);
8100				}
8101			}
8102			else
8103				ops.push_back(OP_MAP);
8104		}
8105
8106		if (state.hasBoundBufferMemory && state.queueIdle)
8107		{
8108			// \note Destroy only buffers after they have been bound
8109			ops.push_back(OP_BUFFER_DESTROY);
8110		}
8111		else
8112		{
8113			if (state.hasBuffer)
8114			{
8115				if (!state.hasBoundBufferMemory)
8116					ops.push_back(OP_BUFFER_BINDMEMORY);
8117			}
8118			else if (!state.hasImage && supportsBuffers)	// Avoid creating buffer if there is already image
8119				ops.push_back(OP_BUFFER_CREATE);
8120		}
8121
8122		if (state.hasBoundImageMemory && state.queueIdle)
8123		{
8124			// \note Destroy only image after they have been bound
8125			ops.push_back(OP_IMAGE_DESTROY);
8126		}
8127		else
8128		{
8129			if (state.hasImage)
8130			{
8131				if (!state.hasBoundImageMemory)
8132					ops.push_back(OP_IMAGE_BINDMEMORY);
8133			}
8134			else if (!state.hasBuffer && supportsImages)	// Avoid creating image if there is already buffer
8135				ops.push_back(OP_IMAGE_CREATE);
8136		}
8137
8138		// Host writes must be flushed before GPU commands and there must be
8139		// buffer or image for GPU commands
8140		if (state.hostFlushed
8141			&& (state.memoryDefined || supportsDeviceBufferWrites(usage) || state.imageDefined || supportsDeviceImageWrites(usage))
8142			&& (state.hasBoundBufferMemory || state.hasBoundImageMemory) // Avoid command buffers if there is no object to use
8143			&& (usageToStageFlags(usage) & (~vk::VK_PIPELINE_STAGE_HOST_BIT)) != 0) // Don't start command buffer if there are no ways to use memory from gpu
8144		{
8145			ops.push_back(OP_COMMAND_BUFFER_BEGIN);
8146		}
8147
8148		if (!state.deviceIdle)
8149			ops.push_back(OP_DEVICE_WAIT_FOR_IDLE);
8150
8151		if (!state.queueIdle)
8152			ops.push_back(OP_QUEUE_WAIT_FOR_IDLE);
8153	}
8154	else if (state.stage == STAGE_COMMAND_BUFFER)
8155	{
8156		if (!state.cache.isClean())
8157		{
8158			ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8159
8160			if (state.hasImage)
8161				ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8162
8163			if (state.hasBuffer)
8164				ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8165		}
8166
8167		if (state.hasBoundBufferMemory)
8168		{
8169			if (usage & USAGE_TRANSFER_DST
8170				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8171			{
8172				ops.push_back(OP_BUFFER_FILL);
8173				ops.push_back(OP_BUFFER_UPDATE);
8174				ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8175				ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8176			}
8177
8178			if (usage & USAGE_TRANSFER_SRC
8179				&& state.memoryDefined
8180				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8181			{
8182				ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8183				ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8184			}
8185		}
8186
8187		if (state.hasBoundImageMemory
8188			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8189				|| getNumberOfSupportedLayouts(usage) > 1))
8190		{
8191			ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8192
8193			{
8194				if (usage & USAGE_TRANSFER_DST
8195					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8196						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8197					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8198				{
8199					ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8200					ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8201					ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8202				}
8203
8204				if (usage & USAGE_TRANSFER_SRC
8205					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8206						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8207					&& state.imageDefined
8208					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8209				{
8210					ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8211					ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8212					ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8213				}
8214			}
8215		}
8216
8217		// \todo [2016-03-09 mika] Add other usages?
8218		if ((state.memoryDefined
8219				&& state.hasBoundBufferMemory
8220				&& (((usage & USAGE_VERTEX_BUFFER)
8221					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8222				|| ((usage & USAGE_INDEX_BUFFER)
8223					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8224				|| ((usage & USAGE_UNIFORM_BUFFER)
8225					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8226						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8227				|| ((usage & USAGE_UNIFORM_TEXEL_BUFFER)
8228					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8229						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8230				|| ((usage & USAGE_STORAGE_BUFFER)
8231					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8232						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8233				|| ((usage & USAGE_STORAGE_TEXEL_BUFFER)
8234					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))
8235			|| (state.imageDefined
8236				&& state.hasBoundImageMemory
8237				&& (((usage & USAGE_STORAGE_IMAGE)
8238						&& state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8239						&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8240							|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8241					|| ((usage & USAGE_SAMPLED_IMAGE)
8242						&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8243							|| state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
8244						&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8245							|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))))
8246		{
8247			ops.push_back(OP_RENDERPASS_BEGIN);
8248		}
8249
8250		// \note This depends on previous operations and has to be always the
8251		// last command buffer operation check
8252		if (ops.empty() || !state.commandBufferIsEmpty)
8253			ops.push_back(OP_COMMAND_BUFFER_END);
8254	}
8255	else if (state.stage == STAGE_RENDER_PASS)
8256	{
8257		if ((usage & USAGE_VERTEX_BUFFER) != 0
8258			&& state.memoryDefined
8259			&& state.hasBoundBufferMemory
8260			&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8261		{
8262			ops.push_back(OP_RENDER_VERTEX_BUFFER);
8263		}
8264
8265		if ((usage & USAGE_INDEX_BUFFER) != 0
8266			&& state.memoryDefined
8267			&& state.hasBoundBufferMemory
8268			&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8269		{
8270			ops.push_back(OP_RENDER_INDEX_BUFFER);
8271		}
8272
8273		if ((usage & USAGE_UNIFORM_BUFFER) != 0
8274			&& state.memoryDefined
8275			&& state.hasBoundBufferMemory)
8276		{
8277			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8278				ops.push_back(OP_RENDER_VERTEX_UNIFORM_BUFFER);
8279
8280			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8281				ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_BUFFER);
8282		}
8283
8284		if ((usage & USAGE_UNIFORM_TEXEL_BUFFER) != 0
8285			&& state.memoryDefined
8286			&& state.hasBoundBufferMemory)
8287		{
8288			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8289				ops.push_back(OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER);
8290
8291			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8292				ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER);
8293		}
8294
8295		if ((usage & USAGE_STORAGE_BUFFER) != 0
8296			&& state.memoryDefined
8297			&& state.hasBoundBufferMemory)
8298		{
8299			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8300				ops.push_back(OP_RENDER_VERTEX_STORAGE_BUFFER);
8301
8302			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8303				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_BUFFER);
8304		}
8305
8306		if ((usage & USAGE_STORAGE_TEXEL_BUFFER) != 0
8307			&& state.memoryDefined
8308			&& state.hasBoundBufferMemory)
8309		{
8310			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8311				ops.push_back(OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER);
8312
8313			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8314				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER);
8315		}
8316
8317		if ((usage & USAGE_STORAGE_IMAGE) != 0
8318			&& state.imageDefined
8319			&& state.hasBoundImageMemory
8320			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL))
8321		{
8322			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8323				ops.push_back(OP_RENDER_VERTEX_STORAGE_IMAGE);
8324
8325			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8326				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_IMAGE);
8327		}
8328
8329		if ((usage & USAGE_SAMPLED_IMAGE) != 0
8330			&& state.imageDefined
8331			&& state.hasBoundImageMemory
8332			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8333				|| state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))
8334		{
8335			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8336				ops.push_back(OP_RENDER_VERTEX_SAMPLED_IMAGE);
8337
8338			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8339				ops.push_back(OP_RENDER_FRAGMENT_SAMPLED_IMAGE);
8340		}
8341
8342		if (!state.renderPassIsEmpty)
8343			ops.push_back(OP_RENDERPASS_END);
8344	}
8345	else
8346		DE_FATAL("Unknown stage");
8347}
8348
8349void removeIllegalAccessFlags (vk::VkAccessFlags& accessflags, vk::VkPipelineStageFlags stageflags)
8350{
8351	if (!(stageflags & vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
8352		accessflags &= ~vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
8353
8354	if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8355		accessflags &= ~vk::VK_ACCESS_INDEX_READ_BIT;
8356
8357	if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8358		accessflags &= ~vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
8359
8360	if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8361						vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8362						vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8363						vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8364						vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8365						vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8366		accessflags &= ~vk::VK_ACCESS_UNIFORM_READ_BIT;
8367
8368	if (!(stageflags & vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT))
8369		accessflags &= ~vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
8370
8371	if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8372						vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8373						vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8374						vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8375						vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8376						vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8377		accessflags &= ~vk::VK_ACCESS_SHADER_READ_BIT;
8378
8379	if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8380						vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8381						vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8382						vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8383						vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8384						vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8385		accessflags &= ~vk::VK_ACCESS_SHADER_WRITE_BIT;
8386
8387	if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8388		accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
8389
8390	if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8391		accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
8392
8393	if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
8394						vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8395		accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
8396
8397	if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
8398						vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8399		accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
8400
8401	if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8402		accessflags &= ~vk::VK_ACCESS_TRANSFER_READ_BIT;
8403
8404	if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8405		accessflags &= ~vk::VK_ACCESS_TRANSFER_WRITE_BIT;
8406
8407	if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8408		accessflags &= ~vk::VK_ACCESS_HOST_READ_BIT;
8409
8410	if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8411		accessflags &= ~vk::VK_ACCESS_HOST_WRITE_BIT;
8412}
8413
8414void applyOp (State& state, const Memory& memory, Op op, Usage usage)
8415{
8416	switch (op)
8417	{
8418		case OP_MAP:
8419			DE_ASSERT(state.stage == STAGE_HOST);
8420			DE_ASSERT(!state.mapped);
8421			state.mapped = true;
8422			break;
8423
8424		case OP_UNMAP:
8425			DE_ASSERT(state.stage == STAGE_HOST);
8426			DE_ASSERT(state.mapped);
8427			state.mapped = false;
8428			break;
8429
8430		case OP_MAP_FLUSH:
8431			DE_ASSERT(state.stage == STAGE_HOST);
8432			DE_ASSERT(!state.hostFlushed);
8433			state.hostFlushed = true;
8434			break;
8435
8436		case OP_MAP_INVALIDATE:
8437			DE_ASSERT(state.stage == STAGE_HOST);
8438			DE_ASSERT(!state.hostInvalidated);
8439			state.hostInvalidated = true;
8440			break;
8441
8442		case OP_MAP_READ:
8443			DE_ASSERT(state.stage == STAGE_HOST);
8444			DE_ASSERT(state.hostInvalidated);
8445			state.rng.getUint32();
8446			break;
8447
8448		case OP_MAP_WRITE:
8449			DE_ASSERT(state.stage == STAGE_HOST);
8450			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8451				state.hostFlushed = false;
8452
8453			state.memoryDefined = true;
8454			state.imageDefined = false;
8455			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8456			state.rng.getUint32();
8457			break;
8458
8459		case OP_MAP_MODIFY:
8460			DE_ASSERT(state.stage == STAGE_HOST);
8461			DE_ASSERT(state.hostInvalidated);
8462
8463			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8464				state.hostFlushed = false;
8465
8466			state.rng.getUint32();
8467			break;
8468
8469		case OP_BUFFER_CREATE:
8470			DE_ASSERT(state.stage == STAGE_HOST);
8471			DE_ASSERT(!state.hasBuffer);
8472
8473			state.hasBuffer = true;
8474			break;
8475
8476		case OP_BUFFER_DESTROY:
8477			DE_ASSERT(state.stage == STAGE_HOST);
8478			DE_ASSERT(state.hasBuffer);
8479			DE_ASSERT(state.hasBoundBufferMemory);
8480
8481			state.hasBuffer = false;
8482			state.hasBoundBufferMemory = false;
8483			break;
8484
8485		case OP_BUFFER_BINDMEMORY:
8486			DE_ASSERT(state.stage == STAGE_HOST);
8487			DE_ASSERT(state.hasBuffer);
8488			DE_ASSERT(!state.hasBoundBufferMemory);
8489
8490			state.hasBoundBufferMemory = true;
8491			break;
8492
8493		case OP_IMAGE_CREATE:
8494			DE_ASSERT(state.stage == STAGE_HOST);
8495			DE_ASSERT(!state.hasImage);
8496			DE_ASSERT(!state.hasBuffer);
8497
8498			state.hasImage = true;
8499			break;
8500
8501		case OP_IMAGE_DESTROY:
8502			DE_ASSERT(state.stage == STAGE_HOST);
8503			DE_ASSERT(state.hasImage);
8504			DE_ASSERT(state.hasBoundImageMemory);
8505
8506			state.hasImage = false;
8507			state.hasBoundImageMemory = false;
8508			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8509			state.imageDefined = false;
8510			break;
8511
8512		case OP_IMAGE_BINDMEMORY:
8513			DE_ASSERT(state.stage == STAGE_HOST);
8514			DE_ASSERT(state.hasImage);
8515			DE_ASSERT(!state.hasBoundImageMemory);
8516
8517			state.hasBoundImageMemory = true;
8518			break;
8519
8520		case OP_IMAGE_TRANSITION_LAYOUT:
8521		{
8522			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8523			DE_ASSERT(state.hasImage);
8524			DE_ASSERT(state.hasBoundImageMemory);
8525
8526			// \todo [2016-03-09 mika] Support linear tiling and predefined data
8527			const vk::VkImageLayout		srcLayout	= state.rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8528			const vk::VkImageLayout		dstLayout	= getRandomNextLayout(state.rng, usage, srcLayout);
8529
8530			vk::VkPipelineStageFlags	dirtySrcStages;
8531			vk::VkAccessFlags			dirtySrcAccesses;
8532			vk::VkPipelineStageFlags	dirtyDstStages;
8533			vk::VkAccessFlags			dirtyDstAccesses;
8534
8535			vk::VkPipelineStageFlags	srcStages;
8536			vk::VkAccessFlags			srcAccesses;
8537			vk::VkPipelineStageFlags	dstStages;
8538			vk::VkAccessFlags			dstAccesses;
8539
8540			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8541
8542			// Try masking some random bits
8543			srcStages	= dirtySrcStages;
8544			srcAccesses	= dirtySrcAccesses;
8545
8546			dstStages	= state.cache.getAllowedStages() & state.rng.getUint32();
8547			dstAccesses	= state.cache.getAllowedAcceses() & state.rng.getUint32();
8548
8549			// If there are no bits in dst stage mask use all stages
8550			dstStages	= dstStages ? dstStages : state.cache.getAllowedStages();
8551
8552			if (!srcStages)
8553				srcStages = dstStages;
8554
8555			removeIllegalAccessFlags(dstAccesses, dstStages);
8556			removeIllegalAccessFlags(srcAccesses, srcStages);
8557
8558			if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
8559				state.imageDefined = false;
8560
8561			state.commandBufferIsEmpty = false;
8562			state.imageLayout = dstLayout;
8563			state.memoryDefined = false;
8564			state.cache.imageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
8565			break;
8566		}
8567
8568		case OP_QUEUE_WAIT_FOR_IDLE:
8569			DE_ASSERT(state.stage == STAGE_HOST);
8570			DE_ASSERT(!state.queueIdle);
8571
8572			state.queueIdle = true;
8573
8574			state.cache.waitForIdle();
8575			break;
8576
8577		case OP_DEVICE_WAIT_FOR_IDLE:
8578			DE_ASSERT(state.stage == STAGE_HOST);
8579			DE_ASSERT(!state.deviceIdle);
8580
8581			state.queueIdle = true;
8582			state.deviceIdle = true;
8583
8584			state.cache.waitForIdle();
8585			break;
8586
8587		case OP_COMMAND_BUFFER_BEGIN:
8588			DE_ASSERT(state.stage == STAGE_HOST);
8589			state.stage = STAGE_COMMAND_BUFFER;
8590			state.commandBufferIsEmpty = true;
8591			// Makes host writes visible to command buffer
8592			state.cache.submitCommandBuffer();
8593			break;
8594
8595		case OP_COMMAND_BUFFER_END:
8596			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8597			state.stage = STAGE_HOST;
8598			state.queueIdle = false;
8599			state.deviceIdle = false;
8600			break;
8601
8602		case OP_BUFFER_COPY_FROM_BUFFER:
8603		case OP_BUFFER_COPY_FROM_IMAGE:
8604		case OP_BUFFER_UPDATE:
8605		case OP_BUFFER_FILL:
8606			state.rng.getUint32();
8607			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8608
8609			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8610				state.hostInvalidated = false;
8611
8612			state.commandBufferIsEmpty = false;
8613			state.memoryDefined = true;
8614			state.imageDefined = false;
8615			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8616			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8617			break;
8618
8619		case OP_BUFFER_COPY_TO_BUFFER:
8620		case OP_BUFFER_COPY_TO_IMAGE:
8621			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8622
8623			state.commandBufferIsEmpty = false;
8624			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8625			break;
8626
8627		case OP_IMAGE_BLIT_FROM_IMAGE:
8628			state.rng.getBool();
8629			// Fall through
8630		case OP_IMAGE_COPY_FROM_BUFFER:
8631		case OP_IMAGE_COPY_FROM_IMAGE:
8632			state.rng.getUint32();
8633			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8634
8635			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8636				state.hostInvalidated = false;
8637
8638			state.commandBufferIsEmpty = false;
8639			state.memoryDefined = false;
8640			state.imageDefined = true;
8641			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8642			break;
8643
8644		case OP_IMAGE_BLIT_TO_IMAGE:
8645			state.rng.getBool();
8646			// Fall through
8647		case OP_IMAGE_COPY_TO_BUFFER:
8648		case OP_IMAGE_COPY_TO_IMAGE:
8649			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8650
8651			state.commandBufferIsEmpty = false;
8652			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8653			break;
8654
8655		case OP_PIPELINE_BARRIER_GLOBAL:
8656		case OP_PIPELINE_BARRIER_BUFFER:
8657		case OP_PIPELINE_BARRIER_IMAGE:
8658		{
8659			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8660
8661			vk::VkPipelineStageFlags	dirtySrcStages;
8662			vk::VkAccessFlags			dirtySrcAccesses;
8663			vk::VkPipelineStageFlags	dirtyDstStages;
8664			vk::VkAccessFlags			dirtyDstAccesses;
8665
8666			vk::VkPipelineStageFlags	srcStages;
8667			vk::VkAccessFlags			srcAccesses;
8668			vk::VkPipelineStageFlags	dstStages;
8669			vk::VkAccessFlags			dstAccesses;
8670
8671			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8672
8673			// Try masking some random bits
8674			srcStages	= dirtySrcStages & state.rng.getUint32();
8675			srcAccesses	= dirtySrcAccesses & state.rng.getUint32();
8676
8677			dstStages	= dirtyDstStages & state.rng.getUint32();
8678			dstAccesses	= dirtyDstAccesses & state.rng.getUint32();
8679
8680			// If there are no bits in stage mask use the original dirty stages
8681			srcStages	= srcStages ? srcStages : dirtySrcStages;
8682			dstStages	= dstStages ? dstStages : dirtyDstStages;
8683
8684			if (!srcStages)
8685				srcStages = dstStages;
8686
8687			removeIllegalAccessFlags(dstAccesses, dstStages);
8688			removeIllegalAccessFlags(srcAccesses, srcStages);
8689
8690			state.commandBufferIsEmpty = false;
8691			state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
8692			break;
8693		}
8694
8695		case OP_RENDERPASS_BEGIN:
8696		{
8697			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8698
8699			state.renderPassIsEmpty	= true;
8700			state.stage				= STAGE_RENDER_PASS;
8701			break;
8702		}
8703
8704		case OP_RENDERPASS_END:
8705		{
8706			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8707
8708			state.renderPassIsEmpty	= true;
8709			state.stage				= STAGE_COMMAND_BUFFER;
8710			break;
8711		}
8712
8713		case OP_RENDER_VERTEX_BUFFER:
8714		{
8715			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8716
8717			state.renderPassIsEmpty = false;
8718			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
8719			break;
8720		}
8721
8722		case OP_RENDER_INDEX_BUFFER:
8723		{
8724			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8725
8726			state.renderPassIsEmpty = false;
8727			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT);
8728			break;
8729		}
8730
8731		case OP_RENDER_VERTEX_UNIFORM_BUFFER:
8732		case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
8733		{
8734			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8735
8736			state.renderPassIsEmpty = false;
8737			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8738			break;
8739		}
8740
8741		case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
8742		case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
8743		{
8744			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8745
8746			state.renderPassIsEmpty = false;
8747			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8748			break;
8749		}
8750
8751		case OP_RENDER_VERTEX_STORAGE_BUFFER:
8752		case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
8753		{
8754			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8755
8756			state.renderPassIsEmpty = false;
8757			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8758			break;
8759		}
8760
8761		case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
8762		case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
8763		{
8764			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8765
8766			state.renderPassIsEmpty = false;
8767			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8768			break;
8769		}
8770
8771		case OP_RENDER_FRAGMENT_STORAGE_IMAGE:
8772		case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:
8773		{
8774			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8775
8776			state.renderPassIsEmpty = false;
8777			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8778			break;
8779		}
8780
8781		case OP_RENDER_VERTEX_STORAGE_IMAGE:
8782		case OP_RENDER_VERTEX_SAMPLED_IMAGE:
8783		{
8784			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8785
8786			state.renderPassIsEmpty = false;
8787			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8788			break;
8789		}
8790
8791		default:
8792			DE_FATAL("Unknown op");
8793	}
8794}
8795
8796de::MovePtr<Command> createHostCommand (Op					op,
8797										de::Random&			rng,
8798										Usage				usage,
8799										vk::VkSharingMode	sharing)
8800{
8801	switch (op)
8802	{
8803		case OP_MAP:					return de::MovePtr<Command>(new Map());
8804		case OP_UNMAP:					return de::MovePtr<Command>(new UnMap());
8805
8806		case OP_MAP_FLUSH:				return de::MovePtr<Command>(new Flush());
8807		case OP_MAP_INVALIDATE:			return de::MovePtr<Command>(new Invalidate());
8808
8809		case OP_MAP_READ:				return de::MovePtr<Command>(new HostMemoryAccess(true, false, rng.getUint32()));
8810		case OP_MAP_WRITE:				return de::MovePtr<Command>(new HostMemoryAccess(false, true, rng.getUint32()));
8811		case OP_MAP_MODIFY:				return de::MovePtr<Command>(new HostMemoryAccess(true, true, rng.getUint32()));
8812
8813		case OP_BUFFER_CREATE:			return de::MovePtr<Command>(new CreateBuffer(usageToBufferUsageFlags(usage), sharing));
8814		case OP_BUFFER_DESTROY:			return de::MovePtr<Command>(new DestroyBuffer());
8815		case OP_BUFFER_BINDMEMORY:		return de::MovePtr<Command>(new BindBufferMemory());
8816
8817		case OP_IMAGE_CREATE:			return de::MovePtr<Command>(new CreateImage(usageToImageUsageFlags(usage), sharing));
8818		case OP_IMAGE_DESTROY:			return de::MovePtr<Command>(new DestroyImage());
8819		case OP_IMAGE_BINDMEMORY:		return de::MovePtr<Command>(new BindImageMemory());
8820
8821		case OP_QUEUE_WAIT_FOR_IDLE:	return de::MovePtr<Command>(new QueueWaitIdle());
8822		case OP_DEVICE_WAIT_FOR_IDLE:	return de::MovePtr<Command>(new DeviceWaitIdle());
8823
8824		default:
8825			DE_FATAL("Unknown op");
8826			return de::MovePtr<Command>(DE_NULL);
8827	}
8828}
8829
8830de::MovePtr<CmdCommand> createCmdCommand (de::Random&	rng,
8831										  const State&	state,
8832										  Op			op,
8833										  Usage			usage)
8834{
8835	switch (op)
8836	{
8837		case OP_BUFFER_FILL:					return de::MovePtr<CmdCommand>(new FillBuffer(rng.getUint32()));
8838		case OP_BUFFER_UPDATE:					return de::MovePtr<CmdCommand>(new UpdateBuffer(rng.getUint32()));
8839		case OP_BUFFER_COPY_TO_BUFFER:			return de::MovePtr<CmdCommand>(new BufferCopyToBuffer());
8840		case OP_BUFFER_COPY_FROM_BUFFER:		return de::MovePtr<CmdCommand>(new BufferCopyFromBuffer(rng.getUint32()));
8841
8842		case OP_BUFFER_COPY_TO_IMAGE:			return de::MovePtr<CmdCommand>(new BufferCopyToImage());
8843		case OP_BUFFER_COPY_FROM_IMAGE:			return de::MovePtr<CmdCommand>(new BufferCopyFromImage(rng.getUint32()));
8844
8845		case OP_IMAGE_TRANSITION_LAYOUT:
8846		{
8847			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8848			DE_ASSERT(state.hasImage);
8849			DE_ASSERT(state.hasBoundImageMemory);
8850
8851			const vk::VkImageLayout		srcLayout	= rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8852			const vk::VkImageLayout		dstLayout	= getRandomNextLayout(rng, usage, srcLayout);
8853
8854			vk::VkPipelineStageFlags	dirtySrcStages;
8855			vk::VkAccessFlags			dirtySrcAccesses;
8856			vk::VkPipelineStageFlags	dirtyDstStages;
8857			vk::VkAccessFlags			dirtyDstAccesses;
8858
8859			vk::VkPipelineStageFlags	srcStages;
8860			vk::VkAccessFlags			srcAccesses;
8861			vk::VkPipelineStageFlags	dstStages;
8862			vk::VkAccessFlags			dstAccesses;
8863
8864			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8865
8866			// Try masking some random bits
8867			srcStages	= dirtySrcStages;
8868			srcAccesses	= dirtySrcAccesses;
8869
8870			dstStages	= state.cache.getAllowedStages() & rng.getUint32();
8871			dstAccesses	= state.cache.getAllowedAcceses() & rng.getUint32();
8872
8873			// If there are no bits in dst stage mask use all stages
8874			dstStages	= dstStages ? dstStages : state.cache.getAllowedStages();
8875
8876			if (!srcStages)
8877				srcStages = dstStages;
8878
8879			removeIllegalAccessFlags(dstAccesses, dstStages);
8880			removeIllegalAccessFlags(srcAccesses, srcStages);
8881
8882			return de::MovePtr<CmdCommand>(new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
8883		}
8884
8885		case OP_IMAGE_COPY_TO_BUFFER:			return de::MovePtr<CmdCommand>(new ImageCopyToBuffer(state.imageLayout));
8886		case OP_IMAGE_COPY_FROM_BUFFER:			return de::MovePtr<CmdCommand>(new ImageCopyFromBuffer(rng.getUint32(), state.imageLayout));
8887		case OP_IMAGE_COPY_TO_IMAGE:			return de::MovePtr<CmdCommand>(new ImageCopyToImage(state.imageLayout));
8888		case OP_IMAGE_COPY_FROM_IMAGE:			return de::MovePtr<CmdCommand>(new ImageCopyFromImage(rng.getUint32(), state.imageLayout));
8889		case OP_IMAGE_BLIT_TO_IMAGE:
8890		{
8891			const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8892			return de::MovePtr<CmdCommand>(new ImageBlitToImage(scale, state.imageLayout));
8893		}
8894
8895		case OP_IMAGE_BLIT_FROM_IMAGE:
8896		{
8897			const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8898			return de::MovePtr<CmdCommand>(new ImageBlitFromImage(rng.getUint32(), scale, state.imageLayout));
8899		}
8900
8901		case OP_PIPELINE_BARRIER_GLOBAL:
8902		case OP_PIPELINE_BARRIER_BUFFER:
8903		case OP_PIPELINE_BARRIER_IMAGE:
8904		{
8905			vk::VkPipelineStageFlags	dirtySrcStages;
8906			vk::VkAccessFlags			dirtySrcAccesses;
8907			vk::VkPipelineStageFlags	dirtyDstStages;
8908			vk::VkAccessFlags			dirtyDstAccesses;
8909
8910			vk::VkPipelineStageFlags	srcStages;
8911			vk::VkAccessFlags			srcAccesses;
8912			vk::VkPipelineStageFlags	dstStages;
8913			vk::VkAccessFlags			dstAccesses;
8914
8915			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8916
8917			// Try masking some random bits
8918			srcStages	= dirtySrcStages & rng.getUint32();
8919			srcAccesses	= dirtySrcAccesses & rng.getUint32();
8920
8921			dstStages	= dirtyDstStages & rng.getUint32();
8922			dstAccesses	= dirtyDstAccesses & rng.getUint32();
8923
8924			// If there are no bits in stage mask use the original dirty stages
8925			srcStages	= srcStages ? srcStages : dirtySrcStages;
8926			dstStages	= dstStages ? dstStages : dirtyDstStages;
8927
8928			if (!srcStages)
8929				srcStages = dstStages;
8930
8931			removeIllegalAccessFlags(dstAccesses, dstStages);
8932			removeIllegalAccessFlags(srcAccesses, srcStages);
8933
8934			PipelineBarrier::Type type;
8935
8936			if (op == OP_PIPELINE_BARRIER_IMAGE)
8937				type = PipelineBarrier::TYPE_IMAGE;
8938			else if (op == OP_PIPELINE_BARRIER_BUFFER)
8939				type = PipelineBarrier::TYPE_BUFFER;
8940			else if (op == OP_PIPELINE_BARRIER_GLOBAL)
8941				type = PipelineBarrier::TYPE_GLOBAL;
8942			else
8943			{
8944				type = PipelineBarrier::TYPE_LAST;
8945				DE_FATAL("Unknown op");
8946			}
8947
8948			if (type == PipelineBarrier::TYPE_IMAGE)
8949				return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::just(state.imageLayout)));
8950			else
8951				return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::nothing<vk::VkImageLayout>()));
8952		}
8953
8954		default:
8955			DE_FATAL("Unknown op");
8956			return de::MovePtr<CmdCommand>(DE_NULL);
8957	}
8958}
8959
8960de::MovePtr<RenderPassCommand> createRenderPassCommand (de::Random&,
8961														const State&,
8962														Op				op)
8963{
8964	switch (op)
8965	{
8966		case OP_RENDER_VERTEX_BUFFER:					return de::MovePtr<RenderPassCommand>(new RenderVertexBuffer());
8967		case OP_RENDER_INDEX_BUFFER:					return de::MovePtr<RenderPassCommand>(new RenderIndexBuffer());
8968
8969		case OP_RENDER_VERTEX_UNIFORM_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderVertexUniformBuffer());
8970		case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformBuffer());
8971
8972		case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:		return de::MovePtr<RenderPassCommand>(new RenderVertexUniformTexelBuffer());
8973		case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:	return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformTexelBuffer());
8974
8975		case OP_RENDER_VERTEX_STORAGE_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderVertexStorageBuffer());
8976		case OP_RENDER_FRAGMENT_STORAGE_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageBuffer());
8977
8978		case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:		return de::MovePtr<RenderPassCommand>(new RenderVertexStorageTexelBuffer());
8979		case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:	return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageTexelBuffer());
8980
8981		case OP_RENDER_VERTEX_STORAGE_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderVertexStorageImage());
8982		case OP_RENDER_FRAGMENT_STORAGE_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageImage());
8983
8984		case OP_RENDER_VERTEX_SAMPLED_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderVertexSampledImage());
8985		case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderFragmentSampledImage());
8986
8987		default:
8988			DE_FATAL("Unknown op");
8989			return de::MovePtr<RenderPassCommand>(DE_NULL);
8990	}
8991}
8992
8993de::MovePtr<CmdCommand> createRenderPassCommands (const Memory&	memory,
8994												  de::Random&	nextOpRng,
8995												  State&		state,
8996												  Usage			usage,
8997												  size_t&		opNdx,
8998												  size_t		opCount)
8999{
9000	vector<RenderPassCommand*>	commands;
9001
9002	try
9003	{
9004		for (; opNdx < opCount; opNdx++)
9005		{
9006			vector<Op>	ops;
9007
9008			getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9009
9010			DE_ASSERT(!ops.empty());
9011
9012			{
9013				const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9014
9015				if (op == OP_RENDERPASS_END)
9016				{
9017					break;
9018				}
9019				else
9020				{
9021					de::Random	rng	(state.rng);
9022
9023					commands.push_back(createRenderPassCommand(rng, state, op).release());
9024					applyOp(state, memory, op, usage);
9025
9026					DE_ASSERT(state.rng == rng);
9027				}
9028			}
9029		}
9030
9031		applyOp(state, memory, OP_RENDERPASS_END, usage);
9032		return de::MovePtr<CmdCommand>(new SubmitRenderPass(commands));
9033	}
9034	catch (...)
9035	{
9036		for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9037			delete commands[commandNdx];
9038
9039		throw;
9040	}
9041}
9042
9043de::MovePtr<Command> createCmdCommands (const Memory&	memory,
9044										de::Random&		nextOpRng,
9045										State&			state,
9046										Usage			usage,
9047										size_t&			opNdx,
9048										size_t			opCount)
9049{
9050	vector<CmdCommand*>	commands;
9051
9052	try
9053	{
9054		for (; opNdx < opCount; opNdx++)
9055		{
9056			vector<Op>	ops;
9057
9058			getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9059
9060			DE_ASSERT(!ops.empty());
9061
9062			{
9063				const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9064
9065				if (op == OP_COMMAND_BUFFER_END)
9066				{
9067					break;
9068				}
9069				else
9070				{
9071					// \note Command needs to known the state before the operation
9072					if (op == OP_RENDERPASS_BEGIN)
9073					{
9074						applyOp(state, memory, op, usage);
9075						commands.push_back(createRenderPassCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9076					}
9077					else
9078					{
9079						de::Random	rng	(state.rng);
9080
9081						commands.push_back(createCmdCommand(rng, state, op, usage).release());
9082						applyOp(state, memory, op, usage);
9083
9084						DE_ASSERT(state.rng == rng);
9085					}
9086
9087				}
9088			}
9089		}
9090
9091		applyOp(state, memory, OP_COMMAND_BUFFER_END, usage);
9092		return de::MovePtr<Command>(new SubmitCommandBuffer(commands));
9093	}
9094	catch (...)
9095	{
9096		for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9097			delete commands[commandNdx];
9098
9099		throw;
9100	}
9101}
9102
9103void createCommands (vector<Command*>&	commands,
9104					 deUint32			seed,
9105					 const Memory&		memory,
9106					 Usage				usage,
9107					 vk::VkSharingMode	sharingMode,
9108					 size_t				opCount)
9109{
9110	State			state		(usage, seed);
9111	// Used to select next operation only
9112	de::Random		nextOpRng	(seed ^ 12930809);
9113
9114	commands.reserve(opCount);
9115
9116	for (size_t opNdx = 0; opNdx < opCount; opNdx++)
9117	{
9118		vector<Op>	ops;
9119
9120		getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9121
9122		DE_ASSERT(!ops.empty());
9123
9124		{
9125			const Op	op	= nextOpRng.choose<Op>(ops.begin(), ops.end());
9126
9127			if (op == OP_COMMAND_BUFFER_BEGIN)
9128			{
9129				applyOp(state, memory, op, usage);
9130				commands.push_back(createCmdCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9131			}
9132			else
9133			{
9134				de::Random	rng	(state.rng);
9135
9136				commands.push_back(createHostCommand(op, rng, usage, sharingMode).release());
9137				applyOp(state, memory, op, usage);
9138
9139				// Make sure that random generator is in sync
9140				DE_ASSERT(state.rng == rng);
9141			}
9142		}
9143	}
9144
9145	// Clean up resources
9146	if (state.hasBuffer && state.hasImage)
9147	{
9148		if (!state.queueIdle)
9149			commands.push_back(new QueueWaitIdle());
9150
9151		if (state.hasBuffer)
9152			commands.push_back(new DestroyBuffer());
9153
9154		if (state.hasImage)
9155			commands.push_back(new DestroyImage());
9156	}
9157}
9158
9159class MemoryTestInstance : public TestInstance
9160{
9161public:
9162
9163	typedef bool(MemoryTestInstance::*StageFunc)(void);
9164
9165												MemoryTestInstance				(::vkt::Context& context, const TestConfig& config);
9166												~MemoryTestInstance				(void);
9167
9168	tcu::TestStatus								iterate							(void);
9169
9170private:
9171	const TestConfig							m_config;
9172	const size_t								m_iterationCount;
9173	const size_t								m_opCount;
9174	const vk::VkPhysicalDeviceMemoryProperties	m_memoryProperties;
9175	deUint32									m_memoryTypeNdx;
9176	size_t										m_iteration;
9177	StageFunc									m_stage;
9178	tcu::ResultCollector						m_resultCollector;
9179
9180	vector<Command*>							m_commands;
9181	MovePtr<Memory>								m_memory;
9182	MovePtr<Context>							m_renderContext;
9183	MovePtr<PrepareContext>						m_prepareContext;
9184
9185	bool										nextIteration					(void);
9186	bool										nextMemoryType					(void);
9187
9188	bool										createCommandsAndAllocateMemory	(void);
9189	bool										prepare							(void);
9190	bool										execute							(void);
9191	bool										verify							(void);
9192	void										resetResources					(void);
9193};
9194
9195void MemoryTestInstance::resetResources (void)
9196{
9197	const vk::DeviceInterface&	vkd		= m_context.getDeviceInterface();
9198	const vk::VkDevice			device	= m_context.getDevice();
9199
9200	VK_CHECK(vkd.deviceWaitIdle(device));
9201
9202	for (size_t commandNdx = 0; commandNdx < m_commands.size(); commandNdx++)
9203	{
9204		delete m_commands[commandNdx];
9205		m_commands[commandNdx] = DE_NULL;
9206	}
9207
9208	m_commands.clear();
9209	m_prepareContext.clear();
9210	m_memory.clear();
9211}
9212
9213bool MemoryTestInstance::nextIteration (void)
9214{
9215	m_iteration++;
9216
9217	if (m_iteration < m_iterationCount)
9218	{
9219		resetResources();
9220		m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9221		return true;
9222	}
9223	else
9224		return nextMemoryType();
9225}
9226
9227bool MemoryTestInstance::nextMemoryType (void)
9228{
9229	resetResources();
9230
9231	DE_ASSERT(m_commands.empty());
9232
9233	m_memoryTypeNdx++;
9234
9235	if (m_memoryTypeNdx < m_memoryProperties.memoryTypeCount)
9236	{
9237		m_iteration	= 0;
9238		m_stage		= &MemoryTestInstance::createCommandsAndAllocateMemory;
9239
9240		return true;
9241	}
9242	else
9243	{
9244		m_stage = DE_NULL;
9245		return false;
9246	}
9247}
9248
9249MemoryTestInstance::MemoryTestInstance (::vkt::Context& context, const TestConfig& config)
9250	: TestInstance			(context)
9251	, m_config				(config)
9252	, m_iterationCount		(5)
9253	, m_opCount				(50)
9254	, m_memoryProperties	(vk::getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
9255	, m_memoryTypeNdx		(0)
9256	, m_iteration			(0)
9257	, m_stage				(&MemoryTestInstance::createCommandsAndAllocateMemory)
9258	, m_resultCollector		(context.getTestContext().getLog())
9259
9260	, m_memory				(DE_NULL)
9261{
9262	TestLog&	log	= context.getTestContext().getLog();
9263	{
9264		const tcu::ScopedLogSection section (log, "TestCaseInfo", "Test Case Info");
9265
9266		log << TestLog::Message << "Buffer size: " << config.size << TestLog::EndMessage;
9267		log << TestLog::Message << "Sharing: " << config.sharing << TestLog::EndMessage;
9268		log << TestLog::Message << "Access: " << config.usage << TestLog::EndMessage;
9269	}
9270
9271	{
9272		const tcu::ScopedLogSection section (log, "MemoryProperties", "Memory Properties");
9273
9274		for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
9275		{
9276			const tcu::ScopedLogSection heapSection (log, "Heap" + de::toString(heapNdx), "Heap " + de::toString(heapNdx));
9277
9278			log << TestLog::Message << "Size: " << m_memoryProperties.memoryHeaps[heapNdx].size << TestLog::EndMessage;
9279			log << TestLog::Message << "Flags: " << m_memoryProperties.memoryHeaps[heapNdx].flags << TestLog::EndMessage;
9280		}
9281
9282		for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
9283		{
9284			const tcu::ScopedLogSection memoryTypeSection (log, "MemoryType" + de::toString(memoryTypeNdx), "Memory type " + de::toString(memoryTypeNdx));
9285
9286			log << TestLog::Message << "Properties: " << m_memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags << TestLog::EndMessage;
9287			log << TestLog::Message << "Heap: " << m_memoryProperties.memoryTypes[memoryTypeNdx].heapIndex << TestLog::EndMessage;
9288		}
9289	}
9290
9291	{
9292		const vk::InstanceInterface&			vki					= context.getInstanceInterface();
9293		const vk::VkPhysicalDevice				physicalDevice		= context.getPhysicalDevice();
9294		const vk::DeviceInterface&				vkd					= context.getDeviceInterface();
9295		const vk::VkDevice						device				= context.getDevice();
9296		const vk::VkQueue						queue				= context.getUniversalQueue();
9297		const deUint32							queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
9298		vector<pair<deUint32, vk::VkQueue> >	queues;
9299
9300		queues.push_back(std::make_pair(queueFamilyIndex, queue));
9301
9302		m_renderContext = MovePtr<Context>(new Context(vki, vkd, physicalDevice, device, queue, queueFamilyIndex, queues, context.getBinaryCollection()));
9303	}
9304}
9305
9306MemoryTestInstance::~MemoryTestInstance (void)
9307{
9308	resetResources();
9309}
9310
9311bool MemoryTestInstance::createCommandsAndAllocateMemory (void)
9312{
9313	const vk::VkDevice							device				= m_context.getDevice();
9314	TestLog&									log					= m_context.getTestContext().getLog();
9315	const vk::InstanceInterface&				vki					= m_context.getInstanceInterface();
9316	const vk::VkPhysicalDevice					physicalDevice		= m_context.getPhysicalDevice();
9317	const vk::DeviceInterface&					vkd					= m_context.getDeviceInterface();
9318	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
9319	const tcu::ScopedLogSection					section				(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "CreateCommands" + de::toString(m_iteration),
9320																		  "Memory type " + de::toString(m_memoryTypeNdx) + " create commands iteration " + de::toString(m_iteration));
9321	const vector<deUint32>&						queues				= m_renderContext->getQueueFamilies();
9322
9323	DE_ASSERT(m_commands.empty());
9324
9325	if (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE)
9326		&& !(memoryProperties.memoryTypes[m_memoryTypeNdx].propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
9327	{
9328		log << TestLog::Message << "Memory type not supported" << TestLog::EndMessage;
9329
9330		return nextMemoryType();
9331	}
9332	else
9333	{
9334		try
9335		{
9336			const vk::VkBufferUsageFlags	bufferUsage		= usageToBufferUsageFlags(m_config.usage);
9337			const vk::VkImageUsageFlags		imageUsage		= usageToImageUsageFlags(m_config.usage);
9338			const vk::VkDeviceSize			maxBufferSize	= bufferUsage != 0
9339															? roundBufferSizeToWxHx4(findMaxBufferSize(vkd, device, bufferUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx))
9340															: 0;
9341			const IVec2						maxImageSize	= imageUsage != 0
9342															? findMaxRGBA8ImageSize(vkd, device, imageUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx)
9343															: IVec2(0, 0);
9344
9345			log << TestLog::Message << "Max buffer size: " << maxBufferSize << TestLog::EndMessage;
9346			log << TestLog::Message << "Max RGBA8 image size: " << maxImageSize << TestLog::EndMessage;
9347
9348			// Skip tests if there are no supported operations
9349			if (maxBufferSize == 0
9350				&& maxImageSize[0] == 0
9351				&& (m_config.usage & (USAGE_HOST_READ|USAGE_HOST_WRITE)) == 0)
9352			{
9353				log << TestLog::Message << "Skipping memory type. None of the usages are supported." << TestLog::EndMessage;
9354
9355				return nextMemoryType();
9356			}
9357			else
9358			{
9359				const deUint32	seed	= 2830980989u ^ deUint32Hash((deUint32)(m_iteration) * m_memoryProperties.memoryTypeCount +  m_memoryTypeNdx);
9360
9361				m_memory	= MovePtr<Memory>(new Memory(vki, vkd, physicalDevice, device, m_config.size, m_memoryTypeNdx, maxBufferSize, maxImageSize[0], maxImageSize[1]));
9362
9363				log << TestLog::Message << "Create commands" << TestLog::EndMessage;
9364				createCommands(m_commands, seed, *m_memory, m_config.usage, m_config.sharing, m_opCount);
9365
9366				m_stage = &MemoryTestInstance::prepare;
9367				return true;
9368			}
9369		}
9370		catch (const tcu::TestError& e)
9371		{
9372			m_resultCollector.fail("Failed, got exception: " + string(e.getMessage()));
9373			return nextMemoryType();
9374		}
9375	}
9376}
9377
9378bool MemoryTestInstance::prepare (void)
9379{
9380	TestLog&					log		= m_context.getTestContext().getLog();
9381	const tcu::ScopedLogSection	section	(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Prepare" + de::toString(m_iteration),
9382											  "Memory type " + de::toString(m_memoryTypeNdx) + " prepare iteration" + de::toString(m_iteration));
9383
9384	m_prepareContext = MovePtr<PrepareContext>(new PrepareContext(*m_renderContext, *m_memory));
9385
9386	DE_ASSERT(!m_commands.empty());
9387
9388	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9389	{
9390		Command& command = *m_commands[cmdNdx];
9391
9392		try
9393		{
9394			command.prepare(*m_prepareContext);
9395		}
9396		catch (const tcu::TestError& e)
9397		{
9398			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to prepare, got exception: " + string(e.getMessage()));
9399			return nextMemoryType();
9400		}
9401	}
9402
9403	m_stage = &MemoryTestInstance::execute;
9404	return true;
9405}
9406
9407bool MemoryTestInstance::execute (void)
9408{
9409	TestLog&					log				= m_context.getTestContext().getLog();
9410	const tcu::ScopedLogSection	section			(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Execute" + de::toString(m_iteration),
9411													  "Memory type " + de::toString(m_memoryTypeNdx) + " execute iteration " + de::toString(m_iteration));
9412	ExecuteContext				executeContext	(*m_renderContext);
9413	const vk::VkDevice			device			= m_context.getDevice();
9414	const vk::DeviceInterface&	vkd				= m_context.getDeviceInterface();
9415
9416	DE_ASSERT(!m_commands.empty());
9417
9418	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9419	{
9420		Command& command = *m_commands[cmdNdx];
9421
9422		try
9423		{
9424			command.execute(executeContext);
9425		}
9426		catch (const tcu::TestError& e)
9427		{
9428			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to execute, got exception: " + string(e.getMessage()));
9429			return nextIteration();
9430		}
9431	}
9432
9433	VK_CHECK(vkd.deviceWaitIdle(device));
9434
9435	m_stage = &MemoryTestInstance::verify;
9436	return true;
9437}
9438
9439bool MemoryTestInstance::verify (void)
9440{
9441	DE_ASSERT(!m_commands.empty());
9442
9443	TestLog&					log				= m_context.getTestContext().getLog();
9444	const tcu::ScopedLogSection	section			(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Verify" + de::toString(m_iteration),
9445													  "Memory type " + de::toString(m_memoryTypeNdx) + " verify iteration " + de::toString(m_iteration));
9446	VerifyContext				verifyContext	(log, m_resultCollector, *m_renderContext, m_config.size);
9447
9448	log << TestLog::Message << "Begin verify" << TestLog::EndMessage;
9449
9450	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9451	{
9452		Command& command = *m_commands[cmdNdx];
9453
9454		try
9455		{
9456			command.verify(verifyContext, cmdNdx);
9457		}
9458		catch (const tcu::TestError& e)
9459		{
9460			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to verify, got exception: " + string(e.getMessage()));
9461			return nextIteration();
9462		}
9463	}
9464
9465	return nextIteration();
9466}
9467
9468tcu::TestStatus MemoryTestInstance::iterate (void)
9469{
9470	if ((this->*m_stage)())
9471		return tcu::TestStatus::incomplete();
9472	else
9473		return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
9474}
9475
9476struct AddPrograms
9477{
9478	void init (vk::SourceCollections& sources, TestConfig config) const
9479	{
9480		// Vertex buffer rendering
9481		if (config.usage & USAGE_VERTEX_BUFFER)
9482		{
9483			const char* const vertexShader =
9484				"#version 310 es\n"
9485				"layout(location = 0) in highp vec2 a_position;\n"
9486				"void main (void) {\n"
9487				"\tgl_PointSize = 1.0;\n"
9488				"\tgl_Position = vec4(1.998 * a_position - vec2(0.999), 0.0, 1.0);\n"
9489				"}\n";
9490
9491			sources.glslSources.add("vertex-buffer.vert")
9492				<< glu::VertexSource(vertexShader);
9493		}
9494
9495		// Index buffer rendering
9496		if (config.usage & USAGE_INDEX_BUFFER)
9497		{
9498			const char* const vertexShader =
9499				"#version 310 es\n"
9500				"precision highp float;\n"
9501				"void main (void) {\n"
9502				"\tgl_PointSize = 1.0;\n"
9503				"\thighp vec2 pos = vec2(gl_VertexIndex % 256, gl_VertexIndex / 256) / vec2(255.0);\n"
9504				"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9505				"}\n";
9506
9507			sources.glslSources.add("index-buffer.vert")
9508				<< glu::VertexSource(vertexShader);
9509		}
9510
9511		if (config.usage & USAGE_UNIFORM_BUFFER)
9512		{
9513			{
9514				std::ostringstream vertexShader;
9515
9516				vertexShader <<
9517					"#version 310 es\n"
9518					"precision highp float;\n"
9519					"layout(set=0, binding=0) uniform Block\n"
9520					"{\n"
9521					"\thighp uvec4 values[" << de::toString<size_t>(MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4)) << "];\n"
9522					"} block;\n"
9523					"void main (void) {\n"
9524					"\tgl_PointSize = 1.0;\n"
9525					"\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9526					"\thighp uint val;\n"
9527					"\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9528					"\t\tval = vecVal.x;\n"
9529					"\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9530					"\t\tval = vecVal.y;\n"
9531					"\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9532					"\t\tval = vecVal.z;\n"
9533					"\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9534					"\t\tval = vecVal.w;\n"
9535					"\tif ((gl_VertexIndex % 2) == 0)\n"
9536					"\t\tval = val & 0xFFFFu;\n"
9537					"\telse\n"
9538					"\t\tval = val >> 16u;\n"
9539					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9540					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9541					"}\n";
9542
9543				sources.glslSources.add("uniform-buffer.vert")
9544					<< glu::VertexSource(vertexShader.str());
9545			}
9546
9547			{
9548				const size_t		arraySize		= MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
9549				const size_t		arrayIntSize	= arraySize * 4;
9550				std::ostringstream	fragmentShader;
9551
9552				fragmentShader <<
9553					"#version 310 es\n"
9554					"precision highp float;\n"
9555					"precision highp int;\n"
9556					"layout(location = 0) out highp vec4 o_color;\n"
9557					"layout(set=0, binding=0) uniform Block\n"
9558					"{\n"
9559					"\thighp uvec4 values[" << arraySize << "];\n"
9560					"} block;\n"
9561					"layout(push_constant) uniform PushC\n"
9562					"{\n"
9563					"\tuint callId;\n"
9564					"\tuint valuesPerPixel;\n"
9565					"} pushC;\n"
9566					"void main (void) {\n"
9567					"\thighp uint id = pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel) + uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9568					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (" << arrayIntSize  << "u / pushC.valuesPerPixel))\n"
9569					"\t\tdiscard;\n"
9570					"\thighp uint value = id;\n"
9571					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9572					"\t{\n"
9573					"\t\thighp uvec4 vecVal = block.values[(value / 4u) % " << arraySize << "u];\n"
9574					"\t\tif ((value % 4u) == 0u)\n"
9575					"\t\t\tvalue = vecVal.x;\n"
9576					"\t\telse if ((value % 4u) == 1u)\n"
9577					"\t\t\tvalue = vecVal.y;\n"
9578					"\t\telse if ((value % 4u) == 2u)\n"
9579					"\t\t\tvalue = vecVal.z;\n"
9580					"\t\telse if ((value % 4u) == 3u)\n"
9581					"\t\t\tvalue = vecVal.w;\n"
9582					"\t}\n"
9583					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9584					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9585					"}\n";
9586
9587				sources.glslSources.add("uniform-buffer.frag")
9588					<< glu::FragmentSource(fragmentShader.str());
9589			}
9590		}
9591
9592		if (config.usage & USAGE_STORAGE_BUFFER)
9593		{
9594			{
9595				// Vertex storage buffer rendering
9596				const char* const vertexShader =
9597					"#version 310 es\n"
9598					"precision highp float;\n"
9599					"layout(set=0, binding=0) buffer Block\n"
9600					"{\n"
9601					"\thighp uvec4 values[];\n"
9602					"} block;\n"
9603					"void main (void) {\n"
9604					"\tgl_PointSize = 1.0;\n"
9605					"\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9606					"\thighp uint val;\n"
9607					"\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9608					"\t\tval = vecVal.x;\n"
9609					"\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9610					"\t\tval = vecVal.y;\n"
9611					"\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9612					"\t\tval = vecVal.z;\n"
9613					"\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9614					"\t\tval = vecVal.w;\n"
9615					"\tif ((gl_VertexIndex % 2) == 0)\n"
9616					"\t\tval = val & 0xFFFFu;\n"
9617					"\telse\n"
9618					"\t\tval = val >> 16u;\n"
9619					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9620					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9621					"}\n";
9622
9623				sources.glslSources.add("storage-buffer.vert")
9624					<< glu::VertexSource(vertexShader);
9625			}
9626
9627			{
9628				std::ostringstream	fragmentShader;
9629
9630				fragmentShader <<
9631					"#version 310 es\n"
9632					"precision highp float;\n"
9633					"precision highp int;\n"
9634					"layout(location = 0) out highp vec4 o_color;\n"
9635					"layout(set=0, binding=0) buffer Block\n"
9636					"{\n"
9637					"\thighp uvec4 values[];\n"
9638					"} block;\n"
9639					"layout(push_constant) uniform PushC\n"
9640					"{\n"
9641					"\tuint valuesPerPixel;\n"
9642					"\tuint bufferSize;\n"
9643					"} pushC;\n"
9644					"void main (void) {\n"
9645					"\thighp uint arrayIntSize = pushC.bufferSize / 4u;\n"
9646					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9647					"\thighp uint value = id;\n"
9648					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9649					"\t{\n"
9650					"\t\thighp uvec4 vecVal = block.values[(value / 4u) % (arrayIntSize / 4u)];\n"
9651					"\t\tif ((value % 4u) == 0u)\n"
9652					"\t\t\tvalue = vecVal.x;\n"
9653					"\t\telse if ((value % 4u) == 1u)\n"
9654					"\t\t\tvalue = vecVal.y;\n"
9655					"\t\telse if ((value % 4u) == 2u)\n"
9656					"\t\t\tvalue = vecVal.z;\n"
9657					"\t\telse if ((value % 4u) == 3u)\n"
9658					"\t\t\tvalue = vecVal.w;\n"
9659					"\t}\n"
9660					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9661					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9662					"}\n";
9663
9664				sources.glslSources.add("storage-buffer.frag")
9665					<< glu::FragmentSource(fragmentShader.str());
9666			}
9667		}
9668
9669		if (config.usage & USAGE_UNIFORM_TEXEL_BUFFER)
9670		{
9671			{
9672				// Vertex uniform texel buffer rendering
9673				const char* const vertexShader =
9674					"#version 310 es\n"
9675					"#extension GL_EXT_texture_buffer : require\n"
9676					"precision highp float;\n"
9677					"layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9678					"void main (void) {\n"
9679					"\tgl_PointSize = 1.0;\n"
9680					"\thighp uint val = texelFetch(u_sampler, gl_VertexIndex).x;\n"
9681					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9682					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9683					"}\n";
9684
9685				sources.glslSources.add("uniform-texel-buffer.vert")
9686					<< glu::VertexSource(vertexShader);
9687			}
9688
9689			{
9690				// Fragment uniform texel buffer rendering
9691				const char* const fragmentShader =
9692					"#version 310 es\n"
9693					"#extension GL_EXT_texture_buffer : require\n"
9694					"precision highp float;\n"
9695					"precision highp int;\n"
9696					"layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9697					"layout(location = 0) out highp vec4 o_color;\n"
9698					"layout(push_constant) uniform PushC\n"
9699					"{\n"
9700					"\tuint callId;\n"
9701					"\tuint valuesPerPixel;\n"
9702					"\tuint maxTexelCount;\n"
9703					"} pushC;\n"
9704					"void main (void) {\n"
9705					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9706					"\thighp uint value = id;\n"
9707					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9708					"\t\tdiscard;\n"
9709					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9710					"\t{\n"
9711					"\t\tvalue = texelFetch(u_sampler, int(value % uint(textureSize(u_sampler)))).x;\n"
9712					"\t}\n"
9713					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9714					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9715					"}\n";
9716
9717				sources.glslSources.add("uniform-texel-buffer.frag")
9718					<< glu::FragmentSource(fragmentShader);
9719			}
9720		}
9721
9722		if (config.usage & USAGE_STORAGE_TEXEL_BUFFER)
9723		{
9724			{
9725				// Vertex storage texel buffer rendering
9726				const char* const vertexShader =
9727					"#version 450\n"
9728					"#extension GL_EXT_texture_buffer : require\n"
9729					"precision highp float;\n"
9730					"layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9731					"out gl_PerVertex {\n"
9732					"\tvec4 gl_Position;\n"
9733					"\tfloat gl_PointSize;\n"
9734					"};\n"
9735					"void main (void) {\n"
9736					"\tgl_PointSize = 1.0;\n"
9737					"\thighp uint val = imageLoad(u_sampler, gl_VertexIndex / 2).x;\n"
9738					"\tif (gl_VertexIndex % 2 == 0)\n"
9739					"\t\tval = val & 0xFFFFu;\n"
9740					"\telse\n"
9741					"\t\tval = val >> 16;\n"
9742					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9743					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9744					"}\n";
9745
9746				sources.glslSources.add("storage-texel-buffer.vert")
9747					<< glu::VertexSource(vertexShader);
9748			}
9749			{
9750				// Fragment storage texel buffer rendering
9751				const char* const fragmentShader =
9752					"#version 310 es\n"
9753					"#extension GL_EXT_texture_buffer : require\n"
9754					"precision highp float;\n"
9755					"precision highp int;\n"
9756					"layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9757					"layout(location = 0) out highp vec4 o_color;\n"
9758					"layout(push_constant) uniform PushC\n"
9759					"{\n"
9760					"\tuint callId;\n"
9761					"\tuint valuesPerPixel;\n"
9762					"\tuint maxTexelCount;\n"
9763					"\tuint width;\n"
9764					"} pushC;\n"
9765					"void main (void) {\n"
9766					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9767					"\thighp uint value = id;\n"
9768					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9769					"\t\tdiscard;\n"
9770					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9771					"\t{\n"
9772					"\t\tvalue = imageLoad(u_sampler, int(value % pushC.width)).x;\n"
9773					"\t}\n"
9774					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9775					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9776					"}\n";
9777
9778				sources.glslSources.add("storage-texel-buffer.frag")
9779					<< glu::FragmentSource(fragmentShader);
9780			}
9781		}
9782
9783		if (config.usage & USAGE_STORAGE_IMAGE)
9784		{
9785			{
9786				// Vertex storage image
9787				const char* const vertexShader =
9788					"#version 450\n"
9789					"precision highp float;\n"
9790					"layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9791					"out gl_PerVertex {\n"
9792					"\tvec4 gl_Position;\n"
9793					"\tfloat gl_PointSize;\n"
9794					"};\n"
9795					"void main (void) {\n"
9796					"\tgl_PointSize = 1.0;\n"
9797					"\thighp vec4 val = imageLoad(u_image, ivec2((gl_VertexIndex / 2) / imageSize(u_image).x, (gl_VertexIndex / 2) % imageSize(u_image).x));\n"
9798					"\thighp vec2 pos;\n"
9799					"\tif (gl_VertexIndex % 2 == 0)\n"
9800					"\t\tpos = val.xy;\n"
9801					"\telse\n"
9802					"\t\tpos = val.zw;\n"
9803					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9804					"}\n";
9805
9806				sources.glslSources.add("storage-image.vert")
9807					<< glu::VertexSource(vertexShader);
9808			}
9809			{
9810				// Fragment storage image
9811				const char* const fragmentShader =
9812					"#version 450\n"
9813					"#extension GL_EXT_texture_buffer : require\n"
9814					"precision highp float;\n"
9815					"layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9816					"layout(location = 0) out highp vec4 o_color;\n"
9817					"void main (void) {\n"
9818					"\thighp uvec2 size = uvec2(imageSize(u_image).x, imageSize(u_image).y);\n"
9819					"\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9820					"\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9821					"\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9822					"\t{\n"
9823					"\t\thighp vec4 floatValue = imageLoad(u_image, ivec2(int((value.z *  256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)));\n"
9824					"\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9825					"\t}\n"
9826					"\to_color = vec4(value) / vec4(255.0);\n"
9827					"}\n";
9828
9829				sources.glslSources.add("storage-image.frag")
9830					<< glu::FragmentSource(fragmentShader);
9831			}
9832		}
9833
9834		if (config.usage & USAGE_SAMPLED_IMAGE)
9835		{
9836			{
9837				// Vertex storage image
9838				const char* const vertexShader =
9839					"#version 450\n"
9840					"precision highp float;\n"
9841					"layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9842					"out gl_PerVertex {\n"
9843					"\tvec4 gl_Position;\n"
9844					"\tfloat gl_PointSize;\n"
9845					"};\n"
9846					"void main (void) {\n"
9847					"\tgl_PointSize = 1.0;\n"
9848					"\thighp vec4 val = texelFetch(u_sampler, ivec2((gl_VertexIndex / 2) / textureSize(u_sampler, 0).x, (gl_VertexIndex / 2) % textureSize(u_sampler, 0).x), 0);\n"
9849					"\thighp vec2 pos;\n"
9850					"\tif (gl_VertexIndex % 2 == 0)\n"
9851					"\t\tpos = val.xy;\n"
9852					"\telse\n"
9853					"\t\tpos = val.zw;\n"
9854					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9855					"}\n";
9856
9857				sources.glslSources.add("sampled-image.vert")
9858					<< glu::VertexSource(vertexShader);
9859			}
9860			{
9861				// Fragment storage image
9862				const char* const fragmentShader =
9863					"#version 450\n"
9864					"#extension GL_EXT_texture_buffer : require\n"
9865					"precision highp float;\n"
9866					"layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9867					"layout(location = 0) out highp vec4 o_color;\n"
9868					"void main (void) {\n"
9869					"\thighp uvec2 size = uvec2(textureSize(u_sampler, 0).x, textureSize(u_sampler, 0).y);\n"
9870					"\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9871					"\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9872					"\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9873					"\t{\n"
9874					"\t\thighp vec4 floatValue = texelFetch(u_sampler, ivec2(int((value.z *  256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)), 0);\n"
9875					"\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9876					"\t}\n"
9877					"\to_color = vec4(value) / vec4(255.0);\n"
9878					"}\n";
9879
9880				sources.glslSources.add("sampled-image.frag")
9881					<< glu::FragmentSource(fragmentShader);
9882			}
9883		}
9884
9885		{
9886			const char* const vertexShader =
9887				"#version 450\n"
9888				"out gl_PerVertex {\n"
9889				"\tvec4 gl_Position;\n"
9890				"};\n"
9891				"precision highp float;\n"
9892				"void main (void) {\n"
9893				"\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
9894				"\t                   ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
9895				"}\n";
9896
9897			sources.glslSources.add("render-quad.vert")
9898				<< glu::VertexSource(vertexShader);
9899		}
9900
9901		{
9902			const char* const fragmentShader =
9903				"#version 310 es\n"
9904				"layout(location = 0) out highp vec4 o_color;\n"
9905				"void main (void) {\n"
9906				"\to_color = vec4(1.0);\n"
9907				"}\n";
9908
9909			sources.glslSources.add("render-white.frag")
9910				<< glu::FragmentSource(fragmentShader);
9911		}
9912	}
9913};
9914
9915} // anonymous
9916
9917tcu::TestCaseGroup* createPipelineBarrierTests (tcu::TestContext& testCtx)
9918{
9919	de::MovePtr<tcu::TestCaseGroup>	group			(new tcu::TestCaseGroup(testCtx, "pipeline_barrier", "Pipeline barrier tests."));
9920	const vk::VkDeviceSize			sizes[]			=
9921	{
9922		1024,		// 1K
9923		8*1024,		// 8K
9924		64*1024,	// 64K
9925		1024*1024,	// 1M
9926	};
9927	const Usage						usages[]		=
9928	{
9929		USAGE_HOST_READ,
9930		USAGE_HOST_WRITE,
9931		USAGE_TRANSFER_SRC,
9932		USAGE_TRANSFER_DST,
9933		USAGE_VERTEX_BUFFER,
9934		USAGE_INDEX_BUFFER,
9935		USAGE_UNIFORM_BUFFER,
9936		USAGE_UNIFORM_TEXEL_BUFFER,
9937		USAGE_STORAGE_BUFFER,
9938		USAGE_STORAGE_TEXEL_BUFFER,
9939		USAGE_STORAGE_IMAGE,
9940		USAGE_SAMPLED_IMAGE
9941	};
9942	const Usage						readUsages[]		=
9943	{
9944		USAGE_HOST_READ,
9945		USAGE_TRANSFER_SRC,
9946		USAGE_VERTEX_BUFFER,
9947		USAGE_INDEX_BUFFER,
9948		USAGE_UNIFORM_BUFFER,
9949		USAGE_UNIFORM_TEXEL_BUFFER,
9950		USAGE_STORAGE_BUFFER,
9951		USAGE_STORAGE_TEXEL_BUFFER,
9952		USAGE_STORAGE_IMAGE,
9953		USAGE_SAMPLED_IMAGE
9954	};
9955
9956	const Usage						writeUsages[]	=
9957	{
9958		USAGE_HOST_WRITE,
9959		USAGE_TRANSFER_DST
9960	};
9961
9962	for (size_t writeUsageNdx = 0; writeUsageNdx < DE_LENGTH_OF_ARRAY(writeUsages); writeUsageNdx++)
9963	{
9964		const Usage	writeUsage	= writeUsages[writeUsageNdx];
9965
9966		for (size_t readUsageNdx = 0; readUsageNdx < DE_LENGTH_OF_ARRAY(readUsages); readUsageNdx++)
9967		{
9968			const Usage						readUsage		= readUsages[readUsageNdx];
9969			const Usage						usage			= writeUsage | readUsage;
9970			const string					usageGroupName	(usageToName(usage));
9971			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9972
9973			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9974			{
9975				const vk::VkDeviceSize	size		= sizes[sizeNdx];
9976				const string			testName	(de::toString((deUint64)(size)));
9977				const TestConfig		config		=
9978				{
9979					usage,
9980					size,
9981					vk::VK_SHARING_MODE_EXCLUSIVE
9982				};
9983
9984				usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE,  testName, testName, AddPrograms(), config));
9985			}
9986
9987			group->addChild(usageGroup.get());
9988			usageGroup.release();
9989		}
9990	}
9991
9992	{
9993		Usage all = (Usage)0;
9994
9995		for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usages); usageNdx++)
9996			all = all | usages[usageNdx];
9997
9998		{
9999			const string					usageGroupName	("all");
10000			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10001
10002			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10003			{
10004				const vk::VkDeviceSize	size		= sizes[sizeNdx];
10005				const string			testName	(de::toString((deUint64)(size)));
10006				const TestConfig		config		=
10007				{
10008					all,
10009					size,
10010					vk::VK_SHARING_MODE_EXCLUSIVE
10011				};
10012
10013				usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE,  testName, testName, AddPrograms(), config));
10014			}
10015
10016			group->addChild(usageGroup.get());
10017			usageGroup.release();
10018		}
10019
10020		{
10021			const string					usageGroupName	("all_device");
10022			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10023
10024			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10025			{
10026				const vk::VkDeviceSize	size		= sizes[sizeNdx];
10027				const string			testName	(de::toString((deUint64)(size)));
10028				const TestConfig		config		=
10029				{
10030					(Usage)(all & (~(USAGE_HOST_READ|USAGE_HOST_WRITE))),
10031					size,
10032					vk::VK_SHARING_MODE_EXCLUSIVE
10033				};
10034
10035				usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE,  testName, testName, AddPrograms(), config));
10036			}
10037
10038			group->addChild(usageGroup.get());
10039			usageGroup.release();
10040		}
10041	}
10042
10043	return group.release();
10044}
10045
10046} // memory
10047} // vkt
10048