1/*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 *      http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Image load/store Tests
23 *//*--------------------------------------------------------------------*/
24
25#include "vktImageLoadStoreTests.hpp"
26#include "vktTestCaseUtil.hpp"
27#include "vktImageTestsUtil.hpp"
28#include "vktImageTexture.hpp"
29
30#include "vkDefs.hpp"
31#include "vkRef.hpp"
32#include "vkRefUtil.hpp"
33#include "vkPlatform.hpp"
34#include "vkPrograms.hpp"
35#include "vkMemUtil.hpp"
36#include "vkBuilderUtil.hpp"
37#include "vkQueryUtil.hpp"
38#include "vkImageUtil.hpp"
39
40#include "deUniquePtr.hpp"
41#include "deSharedPtr.hpp"
42#include "deStringUtil.hpp"
43
44#include "tcuImageCompare.hpp"
45#include "tcuTexture.hpp"
46#include "tcuTextureUtil.hpp"
47#include "tcuFloat.hpp"
48
49#include <string>
50#include <vector>
51
52using namespace vk;
53
54namespace vkt
55{
56namespace image
57{
58namespace
59{
60
61typedef de::SharedPtr<Unique<VkDescriptorSet> >	SharedVkDescriptorSet;
62typedef de::SharedPtr<Unique<VkImageView> >		SharedVkImageView;
63
64template<typename T>
65inline de::SharedPtr<Unique<T> > makeVkSharedPtr (Move<T> vkMove)
66{
67	return de::SharedPtr<Unique<T> >(new Unique<T>(vkMove));
68}
69
70inline VkImageCreateInfo makeImageCreateInfo (const Texture& texture, const VkFormat format, const VkImageUsageFlags usage, const VkImageCreateFlags flags)
71{
72	const VkImageCreateInfo imageParams =
73	{
74		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,														// VkStructureType			sType;
75		DE_NULL,																					// const void*				pNext;
76		(isCube(texture) ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u) | flags,	// VkImageCreateFlags		flags;
77		mapImageType(texture.type()),																// VkImageType				imageType;
78		format,																						// VkFormat					format;
79		makeExtent3D(texture.layerSize()),															// VkExtent3D				extent;
80		1u,																							// deUint32					mipLevels;
81		(deUint32)texture.numLayers(),																// deUint32					arrayLayers;
82		VK_SAMPLE_COUNT_1_BIT,																		// VkSampleCountFlagBits	samples;
83		VK_IMAGE_TILING_OPTIMAL,																	// VkImageTiling			tiling;
84		usage,																						// VkImageUsageFlags		usage;
85		VK_SHARING_MODE_EXCLUSIVE,																	// VkSharingMode			sharingMode;
86		0u,																							// deUint32					queueFamilyIndexCount;
87		DE_NULL,																					// const deUint32*			pQueueFamilyIndices;
88		VK_IMAGE_LAYOUT_UNDEFINED,																	// VkImageLayout			initialLayout;
89	};
90	return imageParams;
91}
92
93inline VkBufferImageCopy makeBufferImageCopy (const Texture& texture)
94{
95	return image::makeBufferImageCopy(makeExtent3D(texture.layerSize()), texture.numLayers());
96}
97
98ImageType getImageTypeForSingleLayer (const ImageType imageType)
99{
100	switch (imageType)
101	{
102		case IMAGE_TYPE_1D:
103		case IMAGE_TYPE_1D_ARRAY:
104			return IMAGE_TYPE_1D;
105
106		case IMAGE_TYPE_2D:
107		case IMAGE_TYPE_2D_ARRAY:
108		case IMAGE_TYPE_CUBE:
109		case IMAGE_TYPE_CUBE_ARRAY:
110			// A single layer for cube is a 2d face
111			return IMAGE_TYPE_2D;
112
113		case IMAGE_TYPE_3D:
114			return IMAGE_TYPE_3D;
115
116		case IMAGE_TYPE_BUFFER:
117			return IMAGE_TYPE_BUFFER;
118
119		default:
120			DE_FATAL("Internal test error");
121			return IMAGE_TYPE_LAST;
122	}
123}
124
125float computeStoreColorScale (const VkFormat format, const tcu::IVec3 imageSize)
126{
127	const int maxImageDimension = de::max(imageSize.x(), de::max(imageSize.y(), imageSize.z()));
128	const float div = static_cast<float>(maxImageDimension - 1);
129
130	if (isUnormFormat(format))
131		return 1.0f / div;
132	else if (isSnormFormat(format))
133		return 2.0f / div;
134	else
135		return 1.0f;
136}
137
138inline float computeStoreColorBias (const VkFormat format)
139{
140	return isSnormFormat(format) ? -1.0f : 0.0f;
141}
142
143inline bool isIntegerFormat (const VkFormat format)
144{
145	return isIntFormat(format) || isUintFormat(format);
146}
147
148tcu::ConstPixelBufferAccess getLayerOrSlice (const Texture& texture, const tcu::ConstPixelBufferAccess access, const int layer)
149{
150	switch (texture.type())
151	{
152		case IMAGE_TYPE_1D:
153		case IMAGE_TYPE_2D:
154		case IMAGE_TYPE_BUFFER:
155			// Not layered
156			DE_ASSERT(layer == 0);
157			return access;
158
159		case IMAGE_TYPE_1D_ARRAY:
160			return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
161
162		case IMAGE_TYPE_2D_ARRAY:
163		case IMAGE_TYPE_CUBE:
164		case IMAGE_TYPE_CUBE_ARRAY:
165		case IMAGE_TYPE_3D:			// 3d texture is treated as if depth was the layers
166			return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
167
168		default:
169			DE_FATAL("Internal test error");
170			return tcu::ConstPixelBufferAccess();
171	}
172}
173
174std::string getFormatCaseName (const VkFormat format)
175{
176	const std::string fullName = getFormatName(format);
177
178	DE_ASSERT(de::beginsWith(fullName, "VK_FORMAT_"));
179
180	return de::toLower(fullName.substr(10));
181}
182
183//! \return true if all layers match in both pixel buffers
184bool comparePixelBuffers (tcu::TestLog&						log,
185						  const Texture&					texture,
186						  const VkFormat					format,
187						  const tcu::ConstPixelBufferAccess	reference,
188						  const tcu::ConstPixelBufferAccess	result)
189{
190	DE_ASSERT(reference.getFormat() == result.getFormat());
191	DE_ASSERT(reference.getSize() == result.getSize());
192
193	const bool intFormat = isIntegerFormat(format);
194	const bool is3d = (texture.type() == IMAGE_TYPE_3D);
195	const int numLayersOrSlices = (is3d ? texture.size().z() : texture.numLayers());
196	const int numCubeFaces = 6;
197
198	int passedLayers = 0;
199	for (int layerNdx = 0; layerNdx < numLayersOrSlices; ++layerNdx)
200	{
201		const std::string comparisonName = "Comparison" + de::toString(layerNdx);
202		const std::string comparisonDesc = "Image Comparison, " +
203			(isCube(texture) ? "face " + de::toString(layerNdx % numCubeFaces) + ", cube " + de::toString(layerNdx / numCubeFaces) :
204			is3d			 ? "slice " + de::toString(layerNdx) : "layer " + de::toString(layerNdx));
205
206		const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(texture, reference, layerNdx);
207		const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(texture, result, layerNdx);
208
209		bool ok = false;
210		if (intFormat)
211			ok = tcu::intThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
212		else
213			ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
214
215		if (ok)
216			++passedLayers;
217	}
218	return passedLayers == numLayersOrSlices;
219}
220
221//!< Zero out invalid pixels in the image (denormalized, infinite, NaN values)
222void replaceBadFloatReinterpretValues (const tcu::PixelBufferAccess access)
223{
224	DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT);
225
226	for (int z = 0; z < access.getDepth(); ++z)
227	for (int y = 0; y < access.getHeight(); ++y)
228	for (int x = 0; x < access.getWidth(); ++x)
229	{
230		const tcu::Vec4 color(access.getPixel(x, y, z));
231		tcu::Vec4 newColor = color;
232
233		for (int i = 0; i < 4; ++i)
234		{
235			if (access.getFormat().type == tcu::TextureFormat::HALF_FLOAT)
236			{
237				const tcu::Float16 f(color[i]);
238				if (f.isDenorm() || f.isInf() || f.isNaN())
239					newColor[i] = 0.0f;
240			}
241			else
242			{
243				const tcu::Float32 f(color[i]);
244				if (f.isDenorm() || f.isInf() || f.isNaN())
245					newColor[i] = 0.0f;
246			}
247		}
248
249		if (newColor != color)
250			access.setPixel(newColor, x, y, z);
251	}
252}
253
254//!< replace invalid pixels in the image (-128)
255void replaceSnormReinterpretValues (const tcu::PixelBufferAccess access)
256{
257	DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
258
259	for (int z = 0; z < access.getDepth(); ++z)
260	for (int y = 0; y < access.getHeight(); ++y)
261	for (int x = 0; x < access.getWidth(); ++x)
262	{
263		const tcu::IVec4 color(access.getPixelInt(x, y, z));
264		tcu::IVec4 newColor = color;
265
266		for (int i = 0; i < 4; ++i)
267		{
268			const deInt32 oldColor(color[i]);
269			if (oldColor == -128) newColor[i] = -127;
270		}
271
272		if (newColor != color)
273		access.setPixel(newColor, x, y, z);
274	}
275}
276
277tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat, const VkFormat readFormat)
278{
279	// Generate a reference image data using the storage format
280
281	tcu::TextureLevel reference(mapVkFormat(imageFormat), imageSize.x(), imageSize.y(), imageSize.z());
282	const tcu::PixelBufferAccess access = reference.getAccess();
283
284	const float storeColorScale = computeStoreColorScale(imageFormat, imageSize);
285	const float storeColorBias = computeStoreColorBias(imageFormat);
286
287	const bool intFormat = isIntegerFormat(imageFormat);
288	const int xMax = imageSize.x() - 1;
289	const int yMax = imageSize.y() - 1;
290
291	for (int z = 0; z < imageSize.z(); ++z)
292	for (int y = 0; y < imageSize.y(); ++y)
293	for (int x = 0; x < imageSize.x(); ++x)
294	{
295		const tcu::IVec4 color(x^y^z, (xMax - x)^y^z, x^(yMax - y)^z, (xMax - x)^(yMax - y)^z);
296
297		if (intFormat)
298			access.setPixel(color, x, y, z);
299		else
300			access.setPixel(color.asFloat()*storeColorScale + storeColorBias, x, y, z);
301	}
302
303	// If the image is to be accessed as a float texture, get rid of invalid values
304
305	if (isFloatFormat(readFormat) && imageFormat != readFormat)
306		replaceBadFloatReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
307	if (isSnormFormat(readFormat) && imageFormat != readFormat)
308		replaceSnormReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
309
310	return reference;
311}
312
313inline tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat)
314{
315	return generateReferenceImage(imageSize, imageFormat, imageFormat);
316}
317
318void flipHorizontally (const tcu::PixelBufferAccess access)
319{
320	const int xMax = access.getWidth() - 1;
321	const int halfWidth = access.getWidth() / 2;
322
323	if (isIntegerFormat(mapTextureFormat(access.getFormat())))
324		for (int z = 0; z < access.getDepth(); z++)
325		for (int y = 0; y < access.getHeight(); y++)
326		for (int x = 0; x < halfWidth; x++)
327		{
328			const tcu::UVec4 temp = access.getPixelUint(xMax - x, y, z);
329			access.setPixel(access.getPixelUint(x, y, z), xMax - x, y, z);
330			access.setPixel(temp, x, y, z);
331		}
332	else
333		for (int z = 0; z < access.getDepth(); z++)
334		for (int y = 0; y < access.getHeight(); y++)
335		for (int x = 0; x < halfWidth; x++)
336		{
337			const tcu::Vec4 temp = access.getPixel(xMax - x, y, z);
338			access.setPixel(access.getPixel(x, y, z), xMax - x, y, z);
339			access.setPixel(temp, x, y, z);
340		}
341}
342
343#if defined(DE_DEBUG)
344inline bool colorScaleAndBiasAreValid (const VkFormat format, const float colorScale, const float colorBias)
345{
346	// Only normalized (fixed-point) formats may have scale/bias
347	const bool integerOrFloatFormat = isIntFormat(format) || isUintFormat(format) || isFloatFormat(format);
348	return !integerOrFloatFormat || (colorScale == 1.0f && colorBias == 0.0f);
349}
350#endif
351
352inline bool formatsAreCompatible (const VkFormat format0, const VkFormat format1)
353{
354	return format0 == format1 || mapVkFormat(format0).getPixelSize() == mapVkFormat(format1).getPixelSize();
355}
356
357void commandImageWriteBarrierBetweenShaderInvocations (Context& context, const VkCommandBuffer cmdBuffer, const VkImage image, const Texture& texture)
358{
359	const DeviceInterface& vk = context.getDeviceInterface();
360
361	const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
362	const VkImageMemoryBarrier shaderWriteBarrier = makeImageMemoryBarrier(
363		VK_ACCESS_SHADER_WRITE_BIT, 0u,
364		VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
365		image, fullImageSubresourceRange);
366
367	vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier);
368}
369
370void commandBufferWriteBarrierBeforeHostRead (Context& context, const VkCommandBuffer cmdBuffer, const VkBuffer buffer, const VkDeviceSize bufferSizeBytes)
371{
372	const DeviceInterface& vk = context.getDeviceInterface();
373
374	const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
375		VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
376		buffer, 0ull, bufferSizeBytes);
377
378	vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
379}
380
381//! Copy all layers of an image to a buffer.
382void commandCopyImageToBuffer (Context&					context,
383							   const VkCommandBuffer	cmdBuffer,
384							   const VkImage			image,
385							   const VkBuffer			buffer,
386							   const VkDeviceSize		bufferSizeBytes,
387							   const Texture&			texture)
388{
389	const DeviceInterface& vk = context.getDeviceInterface();
390
391	const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
392	const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
393		VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
394		VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
395		image, fullImageSubresourceRange);
396
397	const VkBufferImageCopy copyRegion = makeBufferImageCopy(texture);
398
399	const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
400		VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
401		buffer, 0ull, bufferSizeBytes);
402
403	vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &prepareForTransferBarrier);
404	vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1u, &copyRegion);
405	vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &copyBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
406}
407
408//! Minimum chunk size is determined by the offset alignment requirements.
409VkDeviceSize getOptimalUniformBufferChunkSize (Context& context, VkDeviceSize minimumRequiredChunkSizeBytes)
410{
411	const VkPhysicalDeviceProperties properties = getPhysicalDeviceProperties(context.getInstanceInterface(), context.getPhysicalDevice());
412	const VkDeviceSize alignment = properties.limits.minUniformBufferOffsetAlignment;
413
414	if (minimumRequiredChunkSizeBytes > alignment)
415		return alignment + (minimumRequiredChunkSizeBytes / alignment) * alignment;
416	else
417		return alignment;
418}
419
420class StoreTest : public TestCase
421{
422public:
423	enum TestFlags
424	{
425		FLAG_SINGLE_LAYER_BIND = 0x1,	//!< Run the shader multiple times, each time binding a different layer.
426	};
427
428							StoreTest			(tcu::TestContext&	testCtx,
429												 const std::string&	name,
430												 const std::string&	description,
431												 const Texture&		texture,
432												 const VkFormat		format,
433												 const TestFlags	flags = static_cast<TestFlags>(0));
434
435	void					initPrograms		(SourceCollections& programCollection) const;
436
437	TestInstance*			createInstance		(Context&			context) const;
438
439private:
440	const Texture			m_texture;
441	const VkFormat			m_format;
442	const bool				m_singleLayerBind;
443};
444
445StoreTest::StoreTest (tcu::TestContext&		testCtx,
446					  const std::string&	name,
447					  const std::string&	description,
448					  const Texture&		texture,
449					  const VkFormat		format,
450					  const TestFlags		flags)
451	: TestCase			(testCtx, name, description)
452	, m_texture			(texture)
453	, m_format			(format)
454	, m_singleLayerBind	((flags & FLAG_SINGLE_LAYER_BIND) != 0)
455{
456	if (m_singleLayerBind)
457		DE_ASSERT(m_texture.numLayers() > 1);
458}
459
460void StoreTest::initPrograms (SourceCollections& programCollection) const
461{
462	const float storeColorScale = computeStoreColorScale(m_format, m_texture.size());
463	const float storeColorBias = computeStoreColorBias(m_format);
464	DE_ASSERT(colorScaleAndBiasAreValid(m_format, storeColorScale, storeColorBias));
465
466	const std::string xMax = de::toString(m_texture.size().x() - 1);
467	const std::string yMax = de::toString(m_texture.size().y() - 1);
468	const std::string signednessPrefix = isUintFormat(m_format) ? "u" : isIntFormat(m_format) ? "i" : "";
469	const std::string colorBaseExpr = signednessPrefix + "vec4("
470		+ "gx^gy^gz, "
471		+ "(" + xMax + "-gx)^gy^gz, "
472		+ "gx^(" + yMax + "-gy)^gz, "
473		+ "(" + xMax + "-gx)^(" + yMax + "-gy)^gz)";
474
475	const std::string colorExpr = colorBaseExpr + (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
476								  + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
477
478	const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
479	const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
480
481	const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
482	const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
483	const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
484
485	std::ostringstream src;
486	src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
487		<< "\n"
488		<< "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
489		<< "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform highp " << imageTypeStr << " u_image;\n";
490
491	if (m_singleLayerBind)
492		src << "layout (binding = 1) readonly uniform Constants {\n"
493			<< "    int u_layerNdx;\n"
494			<< "};\n";
495
496	src << "\n"
497		<< "void main (void)\n"
498		<< "{\n"
499		<< "    int gx = int(gl_GlobalInvocationID.x);\n"
500		<< "    int gy = int(gl_GlobalInvocationID.y);\n"
501		<< "    int gz = " << (m_singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
502		<< "    imageStore(u_image, " << texelCoordStr << ", " << colorExpr << ");\n"
503		<< "}\n";
504
505	programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
506}
507
508//! Generic test iteration algorithm for image tests
509class BaseTestInstance : public TestInstance
510{
511public:
512									BaseTestInstance						(Context&		context,
513																			 const Texture&	texture,
514																			 const VkFormat	format,
515																			 const bool		singleLayerBind);
516
517	tcu::TestStatus                 iterate									(void);
518
519	virtual							~BaseTestInstance						(void) {}
520
521protected:
522	virtual VkDescriptorSetLayout	prepareDescriptors						(void) = 0;
523	virtual tcu::TestStatus			verifyResult							(void) = 0;
524
525	virtual void					commandBeforeCompute					(const VkCommandBuffer	cmdBuffer) = 0;
526	virtual void					commandBetweenShaderInvocations			(const VkCommandBuffer	cmdBuffer) = 0;
527	virtual void					commandAfterCompute						(const VkCommandBuffer	cmdBuffer) = 0;
528
529	virtual void					commandBindDescriptorsForLayer			(const VkCommandBuffer	cmdBuffer,
530																			 const VkPipelineLayout pipelineLayout,
531																			 const int				layerNdx) = 0;
532
533	const Texture					m_texture;
534	const VkFormat					m_format;
535	const bool						m_singleLayerBind;
536};
537
538BaseTestInstance::BaseTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool singleLayerBind)
539	: TestInstance		(context)
540	, m_texture			(texture)
541	, m_format			(format)
542	, m_singleLayerBind	(singleLayerBind)
543{
544}
545
546tcu::TestStatus BaseTestInstance::iterate (void)
547{
548	const DeviceInterface&	vk					= m_context.getDeviceInterface();
549	const VkDevice			device				= m_context.getDevice();
550	const VkQueue			queue				= m_context.getUniversalQueue();
551	const deUint32			queueFamilyIndex	= m_context.getUniversalQueueFamilyIndex();
552
553	const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
554
555	const VkDescriptorSetLayout descriptorSetLayout = prepareDescriptors();
556	const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
557	const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
558
559	const Unique<VkCommandPool> cmdPool(makeCommandPool(vk, device, queueFamilyIndex));
560	const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
561
562	beginCommandBuffer(vk, *cmdBuffer);
563
564	vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
565	commandBeforeCompute(*cmdBuffer);
566
567	const tcu::IVec3 workSize = (m_singleLayerBind ? m_texture.layerSize() : m_texture.size());
568	const int loopNumLayers = (m_singleLayerBind ? m_texture.numLayers() : 1);
569	for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
570	{
571		commandBindDescriptorsForLayer(*cmdBuffer, *pipelineLayout, layerNdx);
572
573		if (layerNdx > 0)
574			commandBetweenShaderInvocations(*cmdBuffer);
575
576		vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
577	}
578
579	commandAfterCompute(*cmdBuffer);
580
581	endCommandBuffer(vk, *cmdBuffer);
582
583	submitCommandsAndWait(vk, device, queue, *cmdBuffer);
584
585	return verifyResult();
586}
587
588//! Base store test implementation
589class StoreTestInstance : public BaseTestInstance
590{
591public:
592									StoreTestInstance						(Context&		context,
593																			 const Texture&	texture,
594																			 const VkFormat	format,
595																			 const bool		singleLayerBind);
596
597protected:
598	tcu::TestStatus					verifyResult							(void);
599
600	// Add empty implementations for functions that might be not needed
601	void							commandBeforeCompute					(const VkCommandBuffer) {}
602	void							commandBetweenShaderInvocations			(const VkCommandBuffer) {}
603	void							commandAfterCompute						(const VkCommandBuffer) {}
604
605	de::MovePtr<Buffer>				m_imageBuffer;
606	const VkDeviceSize				m_imageSizeBytes;
607};
608
609StoreTestInstance::StoreTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool singleLayerBind)
610	: BaseTestInstance		(context, texture, format, singleLayerBind)
611	, m_imageSizeBytes		(getImageSizeBytes(texture.size(), format))
612{
613	const DeviceInterface&	vk			= m_context.getDeviceInterface();
614	const VkDevice			device		= m_context.getDevice();
615	Allocator&				allocator	= m_context.getDefaultAllocator();
616
617	// A helper buffer with enough space to hold the whole image. Usage flags accommodate all derived test instances.
618
619	m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
620		vk, device, allocator,
621		makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
622		MemoryRequirement::HostVisible));
623}
624
625tcu::TestStatus StoreTestInstance::verifyResult	(void)
626{
627	const DeviceInterface&	vk		= m_context.getDeviceInterface();
628	const VkDevice			device	= m_context.getDevice();
629
630	const tcu::IVec3 imageSize = m_texture.size();
631	const tcu::TextureLevel reference = generateReferenceImage(imageSize, m_format);
632
633	const Allocation& alloc = m_imageBuffer->getAllocation();
634	invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
635	const tcu::ConstPixelBufferAccess result(mapVkFormat(m_format), imageSize, alloc.getHostPtr());
636
637	if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_format, reference.getAccess(), result))
638		return tcu::TestStatus::pass("Passed");
639	else
640		return tcu::TestStatus::fail("Image comparison failed");
641}
642
643//! Store test for images
644class ImageStoreTestInstance : public StoreTestInstance
645{
646public:
647										ImageStoreTestInstance					(Context&				context,
648																				 const Texture&			texture,
649																				 const VkFormat			format,
650																				 const bool				singleLayerBind);
651
652protected:
653	VkDescriptorSetLayout				prepareDescriptors						(void);
654	void								commandBeforeCompute					(const VkCommandBuffer	cmdBuffer);
655	void								commandBetweenShaderInvocations			(const VkCommandBuffer	cmdBuffer);
656	void								commandAfterCompute						(const VkCommandBuffer	cmdBuffer);
657
658	void								commandBindDescriptorsForLayer			(const VkCommandBuffer	cmdBuffer,
659																				 const VkPipelineLayout pipelineLayout,
660																				 const int				layerNdx);
661
662	de::MovePtr<Image>					m_image;
663	de::MovePtr<Buffer>					m_constantsBuffer;
664	const VkDeviceSize					m_constantsBufferChunkSizeBytes;
665	Move<VkDescriptorSetLayout>			m_descriptorSetLayout;
666	Move<VkDescriptorPool>				m_descriptorPool;
667	std::vector<SharedVkDescriptorSet>	m_allDescriptorSets;
668	std::vector<SharedVkImageView>		m_allImageViews;
669};
670
671ImageStoreTestInstance::ImageStoreTestInstance (Context&		context,
672												const Texture&	texture,
673												const VkFormat	format,
674												const bool		singleLayerBind)
675	: StoreTestInstance					(context, texture, format, singleLayerBind)
676	, m_constantsBufferChunkSizeBytes	(getOptimalUniformBufferChunkSize(context, sizeof(deUint32)))
677	, m_allDescriptorSets				(texture.numLayers())
678	, m_allImageViews					(texture.numLayers())
679{
680	const DeviceInterface&	vk					= m_context.getDeviceInterface();
681	const VkDevice			device				= m_context.getDevice();
682	Allocator&				allocator			= m_context.getDefaultAllocator();
683
684	m_image = de::MovePtr<Image>(new Image(
685		vk, device, allocator,
686		makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
687		MemoryRequirement::Any));
688
689	// This buffer will be used to pass constants to the shader
690
691	const int numLayers = m_texture.numLayers();
692	const VkDeviceSize constantsBufferSizeBytes = numLayers * m_constantsBufferChunkSizeBytes;
693	m_constantsBuffer = de::MovePtr<Buffer>(new Buffer(
694		vk, device, allocator,
695		makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
696		MemoryRequirement::HostVisible));
697
698	{
699		const Allocation& alloc = m_constantsBuffer->getAllocation();
700		deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
701
702		deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
703
704		for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
705		{
706			deUint32* valuePtr = reinterpret_cast<deUint32*>(basePtr + layerNdx * m_constantsBufferChunkSizeBytes);
707			*valuePtr = static_cast<deUint32>(layerNdx);
708		}
709
710		flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), constantsBufferSizeBytes);
711	}
712}
713
714VkDescriptorSetLayout ImageStoreTestInstance::prepareDescriptors (void)
715{
716	const DeviceInterface&	vk		= m_context.getDeviceInterface();
717	const VkDevice			device	= m_context.getDevice();
718
719	const int numLayers = m_texture.numLayers();
720	m_descriptorSetLayout = DescriptorSetLayoutBuilder()
721		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
722		.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
723		.build(vk, device);
724
725	m_descriptorPool = DescriptorPoolBuilder()
726		.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
727		.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
728		.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
729
730	if (m_singleLayerBind)
731	{
732		for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
733		{
734			m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
735			m_allImageViews[layerNdx]     = makeVkSharedPtr(makeImageView(
736												vk, device, m_image->get(), mapImageViewType(getImageTypeForSingleLayer(m_texture.type())), m_format,
737												makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
738		}
739	}
740	else // bind all layers at once
741	{
742		m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
743		m_allImageViews[0] = makeVkSharedPtr(makeImageView(
744								vk, device, m_image->get(), mapImageViewType(m_texture.type()), m_format,
745								makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers)));
746	}
747
748	return *m_descriptorSetLayout;  // not passing the ownership
749}
750
751void ImageStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
752{
753	const DeviceInterface&	vk		= m_context.getDeviceInterface();
754	const VkDevice			device	= m_context.getDevice();
755
756	const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
757	const VkImageView imageView = **m_allImageViews[layerNdx];
758
759	const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
760
761	// Set the next chunk of the constants buffer. Each chunk begins with layer index that we've set before.
762	const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(
763		m_constantsBuffer->get(), layerNdx*m_constantsBufferChunkSizeBytes, m_constantsBufferChunkSizeBytes);
764
765	DescriptorSetUpdateBuilder()
766		.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo)
767		.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
768		.update(vk, device);
769	vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
770}
771
772void ImageStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
773{
774	const DeviceInterface& vk = m_context.getDeviceInterface();
775
776	const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
777	const VkImageMemoryBarrier setImageLayoutBarrier = makeImageMemoryBarrier(
778		0u, 0u,
779		VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
780		m_image->get(), fullImageSubresourceRange);
781
782	const VkDeviceSize constantsBufferSize = m_texture.numLayers() * m_constantsBufferChunkSizeBytes;
783	const VkBufferMemoryBarrier writeConstantsBarrier = makeBufferMemoryBarrier(
784		VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
785		m_constantsBuffer->get(), 0ull, constantsBufferSize);
786
787	vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &writeConstantsBarrier, 1, &setImageLayoutBarrier);
788}
789
790void ImageStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
791{
792	commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_image->get(), m_texture);
793}
794
795void ImageStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
796{
797	commandCopyImageToBuffer(m_context, cmdBuffer, m_image->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
798}
799
800//! Store test for buffers
801class BufferStoreTestInstance : public StoreTestInstance
802{
803public:
804									BufferStoreTestInstance					(Context&				context,
805																			 const Texture&			texture,
806																			 const VkFormat			format);
807
808protected:
809	VkDescriptorSetLayout			prepareDescriptors						(void);
810	void							commandAfterCompute						(const VkCommandBuffer	cmdBuffer);
811
812	void							commandBindDescriptorsForLayer			(const VkCommandBuffer	cmdBuffer,
813																			 const VkPipelineLayout pipelineLayout,
814																			 const int				layerNdx);
815
816	Move<VkDescriptorSetLayout>		m_descriptorSetLayout;
817	Move<VkDescriptorPool>			m_descriptorPool;
818	Move<VkDescriptorSet>			m_descriptorSet;
819	Move<VkBufferView>				m_bufferView;
820};
821
822BufferStoreTestInstance::BufferStoreTestInstance (Context&			context,
823												  const Texture&	texture,
824												  const VkFormat	format)
825	: StoreTestInstance(context, texture, format, false)
826{
827}
828
829VkDescriptorSetLayout BufferStoreTestInstance::prepareDescriptors (void)
830{
831	const DeviceInterface&	vk		= m_context.getDeviceInterface();
832	const VkDevice			device	= m_context.getDevice();
833
834	m_descriptorSetLayout = DescriptorSetLayoutBuilder()
835		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
836		.build(vk, device);
837
838	m_descriptorPool = DescriptorPoolBuilder()
839		.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
840		.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
841
842	m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
843	m_bufferView = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
844
845	return *m_descriptorSetLayout;  // not passing the ownership
846}
847
848void BufferStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
849{
850	DE_ASSERT(layerNdx == 0);
851	DE_UNREF(layerNdx);
852
853	const VkDevice			device	= m_context.getDevice();
854	const DeviceInterface&	vk		= m_context.getDeviceInterface();
855
856	DescriptorSetUpdateBuilder()
857		.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
858		.update(vk, device);
859	vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
860}
861
862void BufferStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
863{
864	commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBuffer->get(), m_imageSizeBytes);
865}
866
867class LoadStoreTest : public TestCase
868{
869public:
870	enum TestFlags
871	{
872		FLAG_SINGLE_LAYER_BIND	= 1 << 0,	//!< Run the shader multiple times, each time binding a different layer.
873		FLAG_RESTRICT_IMAGES	= 1 << 1,	//!< If given, images in the shader will be qualified with "restrict".
874	};
875
876							LoadStoreTest			(tcu::TestContext&		testCtx,
877													 const std::string&		name,
878													 const std::string&		description,
879													 const Texture&			texture,
880													 const VkFormat			format,
881													 const VkFormat			imageFormat,
882													 const TestFlags		flags = static_cast<TestFlags>(0));
883
884	void					initPrograms			(SourceCollections&		programCollection) const;
885	TestInstance*			createInstance			(Context&				context) const;
886
887private:
888	const Texture			m_texture;
889	const VkFormat			m_format;				//!< Format as accessed in the shader
890	const VkFormat			m_imageFormat;			//!< Storage format
891	const bool				m_singleLayerBind;
892	const bool				m_restrictImages;
893};
894
895LoadStoreTest::LoadStoreTest (tcu::TestContext&		testCtx,
896							  const std::string&	name,
897							  const std::string&	description,
898							  const Texture&		texture,
899							  const VkFormat		format,
900							  const VkFormat		imageFormat,
901							  const TestFlags		flags)
902	: TestCase			(testCtx, name, description)
903	, m_texture			(texture)
904	, m_format			(format)
905	, m_imageFormat		(imageFormat)
906	, m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
907	, m_restrictImages	((flags & FLAG_RESTRICT_IMAGES) != 0)
908{
909	if (m_singleLayerBind)
910		DE_ASSERT(m_texture.numLayers() > 1);
911
912	DE_ASSERT(formatsAreCompatible(m_format, m_imageFormat));
913}
914
915void LoadStoreTest::initPrograms (SourceCollections& programCollection) const
916{
917	const int			dimension			= (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
918	const ImageType		usedImageType		= (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
919	const std::string	formatQualifierStr	= getShaderImageFormatQualifier(mapVkFormat(m_format));
920	const std::string	imageTypeStr		= getShaderImageType(mapVkFormat(m_format), usedImageType);
921	const std::string	maybeRestrictStr	= (m_restrictImages ? "restrict " : "");
922	const std::string	xMax				= de::toString(m_texture.size().x() - 1);
923
924	std::ostringstream src;
925	src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
926		<< "\n"
927		<< "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
928		<< "layout (binding = 0, " << formatQualifierStr << ") " << maybeRestrictStr << "readonly uniform highp " << imageTypeStr << " u_image0;\n"
929		<< "layout (binding = 1, " << formatQualifierStr << ") " << maybeRestrictStr << "writeonly uniform highp " << imageTypeStr << " u_image1;\n"
930		<< "\n"
931		<< "void main (void)\n"
932		<< "{\n"
933		<< (dimension == 1 ?
934			"    int pos = int(gl_GlobalInvocationID.x);\n"
935			"    imageStore(u_image1, pos, imageLoad(u_image0, " + xMax + "-pos));\n"
936			: dimension == 2 ?
937			"    ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
938			"    imageStore(u_image1, pos, imageLoad(u_image0, ivec2(" + xMax + "-pos.x, pos.y)));\n"
939			: dimension == 3 ?
940			"    ivec3 pos = ivec3(gl_GlobalInvocationID);\n"
941			"    imageStore(u_image1, pos, imageLoad(u_image0, ivec3(" + xMax + "-pos.x, pos.y, pos.z)));\n"
942			: "")
943		<< "}\n";
944
945	programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
946}
947
948//! Load/store test base implementation
949class LoadStoreTestInstance : public BaseTestInstance
950{
951public:
952									LoadStoreTestInstance				(Context&			context,
953																		 const Texture&		texture,
954																		 const VkFormat		format,
955																		 const VkFormat		imageFormat,
956																		 const bool			singleLayerBind);
957
958protected:
959	virtual Buffer*					getResultBuffer						(void) const = 0;	//!< Get the buffer that contains the result image
960
961	tcu::TestStatus					verifyResult						(void);
962
963	// Add empty implementations for functions that might be not needed
964	void							commandBeforeCompute				(const VkCommandBuffer) {}
965	void							commandBetweenShaderInvocations		(const VkCommandBuffer) {}
966	void							commandAfterCompute					(const VkCommandBuffer) {}
967
968	de::MovePtr<Buffer>				m_imageBuffer;		//!< Source data and helper buffer
969	const VkDeviceSize				m_imageSizeBytes;
970	const VkFormat					m_imageFormat;		//!< Image format (for storage, may be different than texture format)
971	tcu::TextureLevel				m_referenceImage;	//!< Used as input data and later to verify result image
972};
973
974LoadStoreTestInstance::LoadStoreTestInstance (Context&			context,
975											  const Texture&	texture,
976											  const VkFormat	format,
977											  const VkFormat	imageFormat,
978											  const bool		singleLayerBind)
979	: BaseTestInstance		(context, texture, format, singleLayerBind)
980	, m_imageSizeBytes		(getImageSizeBytes(texture.size(), format))
981	, m_imageFormat			(imageFormat)
982	, m_referenceImage		(generateReferenceImage(texture.size(), imageFormat, format))
983{
984	const DeviceInterface&	vk			= m_context.getDeviceInterface();
985	const VkDevice			device		= m_context.getDevice();
986	Allocator&				allocator	= m_context.getDefaultAllocator();
987
988	// A helper buffer with enough space to hold the whole image.
989
990	m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
991		vk, device, allocator,
992		makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
993		MemoryRequirement::HostVisible));
994
995	// Copy reference data to buffer for subsequent upload to image.
996
997	const Allocation& alloc = m_imageBuffer->getAllocation();
998	deMemcpy(alloc.getHostPtr(), m_referenceImage.getAccess().getDataPtr(), static_cast<size_t>(m_imageSizeBytes));
999	flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
1000}
1001
1002tcu::TestStatus LoadStoreTestInstance::verifyResult	(void)
1003{
1004	const DeviceInterface&	vk		= m_context.getDeviceInterface();
1005	const VkDevice			device	= m_context.getDevice();
1006
1007	// Apply the same transformation as done in the shader
1008	const tcu::PixelBufferAccess reference = m_referenceImage.getAccess();
1009	flipHorizontally(reference);
1010
1011	const Allocation& alloc = getResultBuffer()->getAllocation();
1012	invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
1013	const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(), alloc.getHostPtr());
1014
1015	if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result))
1016		return tcu::TestStatus::pass("Passed");
1017	else
1018		return tcu::TestStatus::fail("Image comparison failed");
1019}
1020
1021//! Load/store test for images
1022class ImageLoadStoreTestInstance : public LoadStoreTestInstance
1023{
1024public:
1025										ImageLoadStoreTestInstance			(Context&				context,
1026																			 const Texture&			texture,
1027																			 const VkFormat			format,
1028																			 const VkFormat			imageFormat,
1029																			 const bool				singleLayerBind);
1030
1031protected:
1032	VkDescriptorSetLayout				prepareDescriptors					(void);
1033	void								commandBeforeCompute				(const VkCommandBuffer	cmdBuffer);
1034	void								commandBetweenShaderInvocations		(const VkCommandBuffer	cmdBuffer);
1035	void								commandAfterCompute					(const VkCommandBuffer	cmdBuffer);
1036
1037	void								commandBindDescriptorsForLayer		(const VkCommandBuffer	cmdBuffer,
1038																			 const VkPipelineLayout pipelineLayout,
1039																			 const int				layerNdx);
1040
1041	Buffer*								getResultBuffer						(void) const { return m_imageBuffer.get(); }
1042
1043	de::MovePtr<Image>					m_imageSrc;
1044	de::MovePtr<Image>					m_imageDst;
1045	Move<VkDescriptorSetLayout>			m_descriptorSetLayout;
1046	Move<VkDescriptorPool>				m_descriptorPool;
1047	std::vector<SharedVkDescriptorSet>	m_allDescriptorSets;
1048	std::vector<SharedVkImageView>		m_allSrcImageViews;
1049	std::vector<SharedVkImageView>		m_allDstImageViews;
1050};
1051
1052ImageLoadStoreTestInstance::ImageLoadStoreTestInstance (Context&		context,
1053														const Texture&	texture,
1054														const VkFormat	format,
1055														const VkFormat	imageFormat,
1056														const bool		singleLayerBind)
1057	: LoadStoreTestInstance	(context, texture, format, imageFormat, singleLayerBind)
1058	, m_allDescriptorSets	(texture.numLayers())
1059	, m_allSrcImageViews	(texture.numLayers())
1060	, m_allDstImageViews	(texture.numLayers())
1061{
1062	const DeviceInterface&		vk					= m_context.getDeviceInterface();
1063	const VkDevice				device				= m_context.getDevice();
1064	Allocator&					allocator			= m_context.getDefaultAllocator();
1065	const VkImageCreateFlags	imageFlags			= (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
1066
1067	m_imageSrc = de::MovePtr<Image>(new Image(
1068		vk, device, allocator,
1069		makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, imageFlags),
1070		MemoryRequirement::Any));
1071
1072	m_imageDst = de::MovePtr<Image>(new Image(
1073		vk, device, allocator,
1074		makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, imageFlags),
1075		MemoryRequirement::Any));
1076}
1077
1078VkDescriptorSetLayout ImageLoadStoreTestInstance::prepareDescriptors (void)
1079{
1080	const VkDevice			device	= m_context.getDevice();
1081	const DeviceInterface&	vk		= m_context.getDeviceInterface();
1082
1083	const int numLayers = m_texture.numLayers();
1084	m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1085		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1086		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1087		.build(vk, device);
1088
1089	m_descriptorPool = DescriptorPoolBuilder()
1090		.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1091		.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1092		.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1093
1094	if (m_singleLayerBind)
1095	{
1096		for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1097		{
1098			const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
1099			const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u);
1100
1101			m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1102			m_allSrcImageViews[layerNdx]  = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1103			m_allDstImageViews[layerNdx]  = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1104		}
1105	}
1106	else // bind all layers at once
1107	{
1108		const VkImageViewType viewType = mapImageViewType(m_texture.type());
1109		const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers);
1110
1111		m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1112		m_allSrcImageViews[0]  = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1113		m_allDstImageViews[0]  = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1114	}
1115
1116	return *m_descriptorSetLayout;  // not passing the ownership
1117}
1118
1119void ImageLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1120{
1121	const VkDevice			device	= m_context.getDevice();
1122	const DeviceInterface&	vk		= m_context.getDeviceInterface();
1123
1124	const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1125	const VkImageView	  srcImageView	= **m_allSrcImageViews[layerNdx];
1126	const VkImageView	  dstImageView	= **m_allDstImageViews[layerNdx];
1127
1128	const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
1129	const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1130
1131	DescriptorSetUpdateBuilder()
1132		.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1133		.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1134		.update(vk, device);
1135	vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1136}
1137
1138void ImageLoadStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1139{
1140	const DeviceInterface& vk = m_context.getDeviceInterface();
1141
1142	const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1143	{
1144		const VkImageMemoryBarrier preCopyImageBarriers[] =
1145		{
1146			makeImageMemoryBarrier(
1147				0u, VK_ACCESS_TRANSFER_WRITE_BIT,
1148				VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1149				m_imageSrc->get(), fullImageSubresourceRange),
1150			makeImageMemoryBarrier(
1151				0u, VK_ACCESS_SHADER_WRITE_BIT,
1152				VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1153				m_imageDst->get(), fullImageSubresourceRange)
1154		};
1155
1156		const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1157			VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1158			m_imageBuffer->get(), 0ull, m_imageSizeBytes);
1159
1160		vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1161			(VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1162	}
1163	{
1164		const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1165			VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1166			VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1167			m_imageSrc->get(), fullImageSubresourceRange);
1168
1169		const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
1170
1171		vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &copyRegion);
1172		vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1173	}
1174}
1175
1176void ImageLoadStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1177{
1178	commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1179}
1180
1181void ImageLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1182{
1183	commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1184}
1185
1186//! Load/store test for buffers
1187class BufferLoadStoreTestInstance : public LoadStoreTestInstance
1188{
1189public:
1190									BufferLoadStoreTestInstance		(Context&				context,
1191																	 const Texture&			texture,
1192																	 const VkFormat			format,
1193																	 const VkFormat			imageFormat);
1194
1195protected:
1196	VkDescriptorSetLayout			prepareDescriptors				(void);
1197	void							commandAfterCompute				(const VkCommandBuffer	cmdBuffer);
1198
1199	void							commandBindDescriptorsForLayer	(const VkCommandBuffer	cmdBuffer,
1200																	 const VkPipelineLayout pipelineLayout,
1201																	 const int				layerNdx);
1202
1203	Buffer*							getResultBuffer					(void) const { return m_imageBufferDst.get(); }
1204
1205	de::MovePtr<Buffer>				m_imageBufferDst;
1206	Move<VkDescriptorSetLayout>		m_descriptorSetLayout;
1207	Move<VkDescriptorPool>			m_descriptorPool;
1208	Move<VkDescriptorSet>			m_descriptorSet;
1209	Move<VkBufferView>				m_bufferViewSrc;
1210	Move<VkBufferView>				m_bufferViewDst;
1211};
1212
1213BufferLoadStoreTestInstance::BufferLoadStoreTestInstance (Context&			context,
1214														  const Texture&	texture,
1215														  const VkFormat	format,
1216														  const VkFormat	imageFormat)
1217	: LoadStoreTestInstance(context, texture, format, imageFormat, false)
1218{
1219	const DeviceInterface&	vk			= m_context.getDeviceInterface();
1220	const VkDevice			device		= m_context.getDevice();
1221	Allocator&				allocator	= m_context.getDefaultAllocator();
1222
1223	// Create a destination buffer.
1224
1225	m_imageBufferDst = de::MovePtr<Buffer>(new Buffer(
1226		vk, device, allocator,
1227		makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
1228		MemoryRequirement::HostVisible));
1229}
1230
1231VkDescriptorSetLayout BufferLoadStoreTestInstance::prepareDescriptors (void)
1232{
1233	const DeviceInterface&	vk		= m_context.getDeviceInterface();
1234	const VkDevice			device	= m_context.getDevice();
1235
1236	m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1237		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1238		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1239		.build(vk, device);
1240
1241	m_descriptorPool = DescriptorPoolBuilder()
1242		.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1243		.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1244		.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1245
1246	m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1247	m_bufferViewSrc = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
1248	m_bufferViewDst = makeBufferView(vk, device, m_imageBufferDst->get(), m_format, 0ull, m_imageSizeBytes);
1249
1250	return *m_descriptorSetLayout;  // not passing the ownership
1251}
1252
1253void BufferLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1254{
1255	DE_ASSERT(layerNdx == 0);
1256	DE_UNREF(layerNdx);
1257
1258	const VkDevice			device	= m_context.getDevice();
1259	const DeviceInterface&	vk		= m_context.getDeviceInterface();
1260
1261	DescriptorSetUpdateBuilder()
1262		.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewSrc.get())
1263		.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewDst.get())
1264		.update(vk, device);
1265	vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
1266}
1267
1268void BufferLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1269{
1270	commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBufferDst->get(), m_imageSizeBytes);
1271}
1272
1273TestInstance* StoreTest::createInstance (Context& context) const
1274{
1275	if (m_texture.type() == IMAGE_TYPE_BUFFER)
1276		return new BufferStoreTestInstance(context, m_texture, m_format);
1277	else
1278		return new ImageStoreTestInstance(context, m_texture, m_format, m_singleLayerBind);
1279}
1280
1281TestInstance* LoadStoreTest::createInstance (Context& context) const
1282{
1283	if (m_texture.type() == IMAGE_TYPE_BUFFER)
1284		return new BufferLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat);
1285	else
1286		return new ImageLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_singleLayerBind);
1287}
1288
1289// TODO Which image/format combinations should be supported? Spec says it should be queried with vkGetPhysicalDeviceImageFormatProperties.
1290//      What about buffer/format? (texel storage buffer) (use vkGetPhysicalDeviceFormatProperties ?)
1291
1292static const Texture s_textures[] =
1293{
1294	Texture(IMAGE_TYPE_1D,			tcu::IVec3(64,	1,	1),	1),
1295	Texture(IMAGE_TYPE_1D_ARRAY,	tcu::IVec3(64,	1,	1),	8),
1296	Texture(IMAGE_TYPE_2D,			tcu::IVec3(64,	64,	1),	1),
1297	Texture(IMAGE_TYPE_2D_ARRAY,	tcu::IVec3(64,	64,	1),	8),
1298	Texture(IMAGE_TYPE_3D,			tcu::IVec3(64,	64,	8),	1),
1299	Texture(IMAGE_TYPE_CUBE,		tcu::IVec3(64,	64,	1),	6),
1300	Texture(IMAGE_TYPE_CUBE_ARRAY,	tcu::IVec3(64,	64,	1),	2*6),
1301	Texture(IMAGE_TYPE_BUFFER,		tcu::IVec3(64,	1,	1),	1),
1302};
1303
1304const Texture& getTestTexture (const ImageType imageType)
1305{
1306	for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1307		if (s_textures[textureNdx].type() == imageType)
1308			return s_textures[textureNdx];
1309
1310	DE_FATAL("Internal error");
1311	return s_textures[0];
1312}
1313
1314static const VkFormat s_formats[] =
1315{
1316	VK_FORMAT_R32G32B32A32_SFLOAT,
1317	VK_FORMAT_R16G16B16A16_SFLOAT,
1318	VK_FORMAT_R32_SFLOAT,
1319
1320	VK_FORMAT_R32G32B32A32_UINT,
1321	VK_FORMAT_R16G16B16A16_UINT,
1322	VK_FORMAT_R8G8B8A8_UINT,
1323	VK_FORMAT_R32_UINT,
1324
1325	VK_FORMAT_R32G32B32A32_SINT,
1326	VK_FORMAT_R16G16B16A16_SINT,
1327	VK_FORMAT_R8G8B8A8_SINT,
1328	VK_FORMAT_R32_SINT,
1329
1330	VK_FORMAT_R8G8B8A8_UNORM,
1331
1332	VK_FORMAT_R8G8B8A8_SNORM,
1333};
1334
1335} // anonymous ns
1336
1337tcu::TestCaseGroup* createImageStoreTests (tcu::TestContext& testCtx)
1338{
1339	de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "store", "Plain imageStore() cases"));
1340
1341	for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1342	{
1343		const Texture& texture = s_textures[textureNdx];
1344		de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1345		const bool isLayered = (texture.numLayers() > 1);
1346
1347		for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1348		{
1349			groupByImageViewType->addChild(new StoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]), "", texture, s_formats[formatNdx]));
1350
1351			if (isLayered)
1352				groupByImageViewType->addChild(new StoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]) + "_single_layer", "",
1353												texture, s_formats[formatNdx], StoreTest::FLAG_SINGLE_LAYER_BIND));
1354		}
1355		testGroup->addChild(groupByImageViewType.release());
1356	}
1357
1358	return testGroup.release();
1359}
1360
1361tcu::TestCaseGroup* createImageLoadStoreTests (tcu::TestContext& testCtx)
1362{
1363	de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store", "Cases with imageLoad() followed by imageStore()"));
1364
1365	for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1366	{
1367		const Texture& texture = s_textures[textureNdx];
1368		de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1369		const bool isLayered = (texture.numLayers() > 1);
1370
1371		for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1372		{
1373			groupByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]), "",
1374											texture, s_formats[formatNdx], s_formats[formatNdx]));
1375
1376			if (isLayered)
1377				groupByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]) + "_single_layer", "",
1378												texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_SINGLE_LAYER_BIND));
1379		}
1380		testGroup->addChild(groupByImageViewType.release());
1381	}
1382
1383	return testGroup.release();
1384}
1385
1386tcu::TestCaseGroup* createImageFormatReinterpretTests (tcu::TestContext& testCtx)
1387{
1388	de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "format_reinterpret",	"Cases with differing texture and image formats"));
1389
1390	for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1391	{
1392		const Texture& texture = s_textures[textureNdx];
1393		de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1394
1395		for (int imageFormatNdx = 0; imageFormatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++imageFormatNdx)
1396		for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1397		{
1398			//TODO Are all conversions valid or do we have to limit (or expand) somehow? Is it stated anywhere in the spec?
1399
1400			const std::string caseName = getFormatCaseName(s_formats[imageFormatNdx]) + "_" + getFormatCaseName(s_formats[formatNdx]);
1401			if (imageFormatNdx != formatNdx && formatsAreCompatible(s_formats[imageFormatNdx], s_formats[formatNdx]))
1402				groupByImageViewType->addChild(new LoadStoreTest(testCtx, caseName, "", texture, s_formats[formatNdx], s_formats[imageFormatNdx]));
1403		}
1404		testGroup->addChild(groupByImageViewType.release());
1405	}
1406
1407	return testGroup.release();
1408}
1409
1410de::MovePtr<TestCase> createImageQualifierRestrictCase (tcu::TestContext& testCtx, const ImageType imageType, const std::string& name)
1411{
1412	const VkFormat format = VK_FORMAT_R32G32B32A32_UINT;
1413	const Texture& texture = getTestTexture(imageType);
1414	return de::MovePtr<TestCase>(new LoadStoreTest(testCtx, name, "", texture, format, format, LoadStoreTest::FLAG_RESTRICT_IMAGES));
1415}
1416
1417} // image
1418} // vkt
1419