1/*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 *      http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file  vktSparseResourcesMipmapSparseResidency.cpp
21 * \brief Sparse partially resident images with mipmaps tests
22 *//*--------------------------------------------------------------------*/
23
24#include "vktSparseResourcesMipmapSparseResidency.hpp"
25#include "vktSparseResourcesTestsUtil.hpp"
26#include "vktSparseResourcesBase.hpp"
27#include "vktTestCaseUtil.hpp"
28
29#include "vkDefs.hpp"
30#include "vkRef.hpp"
31#include "vkRefUtil.hpp"
32#include "vkPlatform.hpp"
33#include "vkPrograms.hpp"
34#include "vkMemUtil.hpp"
35#include "vkBuilderUtil.hpp"
36#include "vkImageUtil.hpp"
37#include "vkQueryUtil.hpp"
38#include "vkTypeUtil.hpp"
39
40#include "deUniquePtr.hpp"
41#include "deStringUtil.hpp"
42
43#include <string>
44#include <vector>
45
46using namespace vk;
47
48namespace vkt
49{
50namespace sparse
51{
52namespace
53{
54
55tcu::UVec3 alignedDivide (const VkExtent3D& extent, const VkExtent3D& divisor)
56{
57	tcu::UVec3 result;
58
59	result.x() = extent.width  / divisor.width  + ((extent.width  % divisor.width)  ? 1u : 0u);
60	result.y() = extent.height / divisor.height + ((extent.height % divisor.height) ? 1u : 0u);
61	result.z() = extent.depth  / divisor.depth  + ((extent.depth  % divisor.depth)  ? 1u : 0u);
62
63	return result;
64}
65
66class MipmapSparseResidencyCase : public TestCase
67{
68public:
69					MipmapSparseResidencyCase	(tcu::TestContext&			testCtx,
70												 const std::string&			name,
71												 const std::string&			description,
72												 const ImageType			imageType,
73												 const tcu::UVec3&			imageSize,
74												 const tcu::TextureFormat&	format);
75
76	TestInstance*	createInstance				(Context&					context) const;
77
78private:
79	const ImageType				m_imageType;
80	const tcu::UVec3			m_imageSize;
81	const tcu::TextureFormat	m_format;
82};
83
84MipmapSparseResidencyCase::MipmapSparseResidencyCase (tcu::TestContext&			testCtx,
85													  const std::string&		name,
86													  const std::string&		description,
87													  const ImageType			imageType,
88													  const tcu::UVec3&			imageSize,
89													  const tcu::TextureFormat&	format)
90	: TestCase				(testCtx, name, description)
91	, m_imageType			(imageType)
92	, m_imageSize			(imageSize)
93	, m_format				(format)
94{
95}
96
97class MipmapSparseResidencyInstance : public SparseResourcesBaseInstance
98{
99public:
100					MipmapSparseResidencyInstance	(Context&									 context,
101													 const ImageType							 imageType,
102													 const tcu::UVec3&							 imageSize,
103													 const tcu::TextureFormat&					 format);
104
105	tcu::TestStatus	iterate							(void);
106
107private:
108
109	const ImageType				m_imageType;
110	const tcu::UVec3			m_imageSize;
111	const tcu::TextureFormat	m_format;
112};
113
114MipmapSparseResidencyInstance::MipmapSparseResidencyInstance (Context&					context,
115															  const ImageType			imageType,
116															  const tcu::UVec3&			imageSize,
117															  const tcu::TextureFormat&	format)
118	: SparseResourcesBaseInstance	(context)
119	, m_imageType					(imageType)
120	, m_imageSize					(imageSize)
121	, m_format						(format)
122{
123}
124
125
126tcu::TestStatus MipmapSparseResidencyInstance::iterate (void)
127{
128	const InstanceInterface&		instance		= m_context.getInstanceInterface();
129	const DeviceInterface&			deviceInterface = m_context.getDeviceInterface();
130	const VkPhysicalDevice			physicalDevice	= m_context.getPhysicalDevice();
131	const VkPhysicalDeviceFeatures  deviceFeatures  = getPhysicalDeviceFeatures(instance, physicalDevice);
132
133	// Check if device support sparse operations for image type
134	switch (mapImageType(m_imageType))
135	{
136		case VK_IMAGE_TYPE_2D:
137		{
138			if (deviceFeatures.sparseResidencyImage2D == false)
139				return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Sparse residency for 2D Image not supported");
140		}
141		break;
142		case VK_IMAGE_TYPE_3D:
143		{
144			if (deviceFeatures.sparseResidencyImage3D == false)
145				return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Sparse residency for 3D Image not supported");
146
147		}
148		break;
149		default:
150			return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Not supported image type");
151	};
152
153	// Check if device support sparse operations for image format
154	const std::vector<VkSparseImageFormatProperties> sparseImageFormatPropVec =
155		getPhysicalDeviceSparseImageFormatProperties(instance, physicalDevice, mapTextureFormat(m_format), mapImageType(m_imageType),
156		VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL);
157
158	if (sparseImageFormatPropVec.size() == 0)
159	{
160		return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "The image format does not support sparse operations");
161	}
162
163	// Check if image size does not exceed device limits
164	const VkPhysicalDeviceProperties deviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
165
166	if (isImageSizeSupported(m_imageType, m_imageSize, deviceProperties.limits) == false)
167	{
168		return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Image size not supported for device");
169	}
170
171	QueueRequirementsVec queueRequirements;
172	queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
173	queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
174
175	// Create logical device supporting both sparse and transfer queues
176	if (!createDeviceSupportingQueues(queueRequirements))
177	{
178		return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Could not create device supporting sparse and compute queue");
179	}
180
181	const VkPhysicalDeviceMemoryProperties deviceMemoryProperties = getPhysicalDeviceMemoryProperties(instance, physicalDevice);
182
183	// Create memory allocator for logical device
184	const de::UniquePtr<Allocator> allocator(new SimpleAllocator(deviceInterface, *m_logicalDevice, deviceMemoryProperties));
185
186	// Create queue supporting sparse binding operations
187	const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
188
189	// Create queue supporting compute and transfer operations
190	const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
191
192	VkImageCreateInfo imageSparseInfo;
193
194	imageSparseInfo.sType					= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;					//VkStructureType		sType;
195	imageSparseInfo.pNext					= DE_NULL;												//const void*			pNext;
196	imageSparseInfo.flags					= VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT;					//VkImageCreateFlags	flags;
197	imageSparseInfo.imageType				= mapImageType(m_imageType);							//VkImageType			imageType;
198	imageSparseInfo.format					= mapTextureFormat(m_format);							//VkFormat				format;
199	imageSparseInfo.extent					= makeExtent3D(getLayerSize(m_imageType, m_imageSize));	//VkExtent3D			extent;
200	imageSparseInfo.arrayLayers				= getNumLayers(m_imageType, m_imageSize);				//deUint32				arrayLayers;
201	imageSparseInfo.samples					= VK_SAMPLE_COUNT_1_BIT;								//VkSampleCountFlagBits	samples;
202	imageSparseInfo.tiling					= VK_IMAGE_TILING_OPTIMAL;								//VkImageTiling			tiling;
203	imageSparseInfo.initialLayout			= VK_IMAGE_LAYOUT_UNDEFINED;							//VkImageLayout			initialLayout;
204	imageSparseInfo.usage					= VK_IMAGE_USAGE_TRANSFER_DST_BIT |
205											  VK_IMAGE_USAGE_TRANSFER_SRC_BIT;						//VkImageUsageFlags		usage;
206	imageSparseInfo.sharingMode				= VK_SHARING_MODE_EXCLUSIVE;							//VkSharingMode			sharingMode;
207	imageSparseInfo.queueFamilyIndexCount	= 0u;													//deUint32				queueFamilyIndexCount;
208	imageSparseInfo.pQueueFamilyIndices		= DE_NULL;												//const deUint32*		pQueueFamilyIndices;
209
210	if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
211	{
212		imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
213	}
214
215	VkImageFormatProperties imageFormatProperties;
216	instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
217													imageSparseInfo.format,
218													imageSparseInfo.imageType,
219													imageSparseInfo.tiling,
220													imageSparseInfo.usage,
221													imageSparseInfo.flags,
222													&imageFormatProperties);
223
224
225	imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo);
226
227	// Allow sharing of sparse image by two different queue families (if necessary)
228	const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex };
229
230	if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex)
231	{
232		imageSparseInfo.sharingMode				= VK_SHARING_MODE_CONCURRENT;	//VkSharingMode			sharingMode;
233		imageSparseInfo.queueFamilyIndexCount	= 2u;							//deUint32				queueFamilyIndexCount;
234		imageSparseInfo.pQueueFamilyIndices		= queueFamilyIndices;			//const deUint32*		pQueueFamilyIndices;
235	}
236
237	// Create sparse image
238	const Unique<VkImage> imageSparse(createImage(deviceInterface, *m_logicalDevice, &imageSparseInfo));
239
240	// Get sparse image general memory requirements
241	const VkMemoryRequirements imageSparseMemRequirements = getImageMemoryRequirements(deviceInterface, *m_logicalDevice, *imageSparse);
242
243	// Check if required image memory size does not exceed device limits
244	if (imageSparseMemRequirements.size > deviceProperties.limits.sparseAddressSpaceSize)
245	{
246		return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Required memory size for sparse resource exceeds device limits");
247	}
248
249	DE_ASSERT((imageSparseMemRequirements.size % imageSparseMemRequirements.alignment) == 0);
250
251	// Get sparse image sparse memory requirements
252	deUint32 sparseMemRequirementsCount = 0;
253
254	deviceInterface.getImageSparseMemoryRequirements(*m_logicalDevice, *imageSparse, &sparseMemRequirementsCount, DE_NULL);
255
256	DE_ASSERT(sparseMemRequirementsCount != 0);
257
258	std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements;
259	sparseMemoryRequirements.resize(sparseMemRequirementsCount);
260
261	deviceInterface.getImageSparseMemoryRequirements(*m_logicalDevice, *imageSparse, &sparseMemRequirementsCount, &sparseMemoryRequirements[0]);
262
263	deUint32 colorAspectIndex = NO_MATCH_FOUND;
264
265	// Check if image includes color aspect
266	for (deUint32 memoryReqNdx = 0; memoryReqNdx < sparseMemRequirementsCount; ++memoryReqNdx)
267	{
268		if (sparseMemoryRequirements[memoryReqNdx].formatProperties.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT)
269		{
270			colorAspectIndex = memoryReqNdx;
271			break;
272		}
273	}
274
275	if (colorAspectIndex == NO_MATCH_FOUND)
276	{
277		return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
278	}
279
280	const VkSparseImageMemoryRequirements aspectRequirements	= sparseMemoryRequirements[colorAspectIndex];
281	const VkImageAspectFlags			  aspectMask			= aspectRequirements.formatProperties.aspectMask;
282	const VkExtent3D					  imageGranularity		= aspectRequirements.formatProperties.imageGranularity;
283
284	DE_ASSERT((aspectRequirements.imageMipTailSize % imageSparseMemRequirements.alignment) == 0);
285
286	typedef de::SharedPtr< Unique<VkDeviceMemory> > DeviceMemoryUniquePtr;
287
288	std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
289	std::vector<VkSparseMemoryBind>		 imageMipTailMemoryBinds;
290	std::vector<DeviceMemoryUniquePtr>	 deviceMemUniquePtrVec;
291	const deUint32						 memoryType = findMatchingMemoryType(deviceMemoryProperties, imageSparseMemRequirements, MemoryRequirement::Any);
292
293	if (memoryType == NO_MATCH_FOUND)
294	{
295		return tcu::TestStatus(QP_TEST_RESULT_FAIL, "No matching memory type found");
296	}
297
298	// Bind memory for each layer
299	for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
300	{
301		for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
302		{
303			const VkExtent3D		 mipExtent			= mipLevelExtents(imageSparseInfo.extent, mipLevelNdx);
304			const tcu::UVec3		 sparseBlocks		= alignedDivide(mipExtent, imageGranularity);
305			const deUint32			 numSparseBlocks	= sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
306
307			const VkMemoryAllocateInfo allocInfo =
308			{
309				VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,					//	VkStructureType			sType;
310				DE_NULL,												//	const void*				pNext;
311				imageSparseMemRequirements.alignment * numSparseBlocks,	//	VkDeviceSize			allocationSize;
312				memoryType,												//	deUint32				memoryTypeIndex;
313			};
314
315			VkDeviceMemory deviceMemory = 0;
316			VK_CHECK(deviceInterface.allocateMemory(*m_logicalDevice, &allocInfo, DE_NULL, &deviceMemory));
317
318			deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(deviceMemory), Deleter<VkDeviceMemory>(deviceInterface, *m_logicalDevice, DE_NULL))));
319
320			VkSparseImageMemoryBind imageMemoryBind;
321
322			imageMemoryBind.subresource.aspectMask	= aspectMask;
323			imageMemoryBind.subresource.mipLevel	= mipLevelNdx;
324			imageMemoryBind.subresource.arrayLayer	= layerNdx;
325			imageMemoryBind.memory					= deviceMemory;
326			imageMemoryBind.memoryOffset			= 0u;
327			imageMemoryBind.flags					= 0u;
328			imageMemoryBind.offset					= makeOffset3D(0u, 0u, 0u);
329			imageMemoryBind.extent					= mipExtent;
330
331			imageResidencyMemoryBinds.push_back(imageMemoryBind);
332		}
333
334		if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
335		{
336			const VkMemoryAllocateInfo allocInfo =
337			{
338				VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,	//	VkStructureType	sType;
339				DE_NULL,								//	const void*		pNext;
340				aspectRequirements.imageMipTailSize,	//	VkDeviceSize	allocationSize;
341				memoryType,								//	deUint32		memoryTypeIndex;
342			};
343
344			VkDeviceMemory deviceMemory = 0;
345			VK_CHECK(deviceInterface.allocateMemory(*m_logicalDevice, &allocInfo, DE_NULL, &deviceMemory));
346
347			deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(deviceMemory), Deleter<VkDeviceMemory>(deviceInterface, *m_logicalDevice, DE_NULL))));
348
349			VkSparseMemoryBind imageMipTailMemoryBind;
350
351			imageMipTailMemoryBind.resourceOffset	= aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride;
352			imageMipTailMemoryBind.size				= aspectRequirements.imageMipTailSize;
353			imageMipTailMemoryBind.memory			= deviceMemory;
354			imageMipTailMemoryBind.memoryOffset		= 0u;
355			imageMipTailMemoryBind.flags			= 0u;
356
357			imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
358		}
359	}
360
361	if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
362	{
363		const VkMemoryAllocateInfo allocInfo =
364		{
365			VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,	//	VkStructureType	sType;
366			DE_NULL,								//	const void*		pNext;
367			aspectRequirements.imageMipTailSize,	//	VkDeviceSize	allocationSize;
368			memoryType,								//	deUint32		memoryTypeIndex;
369		};
370
371		VkDeviceMemory deviceMemory = 0;
372		VK_CHECK(deviceInterface.allocateMemory(*m_logicalDevice, &allocInfo, DE_NULL, &deviceMemory));
373
374		deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(deviceMemory), Deleter<VkDeviceMemory>(deviceInterface, *m_logicalDevice, DE_NULL))));
375
376		VkSparseMemoryBind imageMipTailMemoryBind;
377
378		imageMipTailMemoryBind.resourceOffset	= aspectRequirements.imageMipTailOffset;
379		imageMipTailMemoryBind.size				= aspectRequirements.imageMipTailSize;
380		imageMipTailMemoryBind.memory			= deviceMemory;
381		imageMipTailMemoryBind.memoryOffset		= 0u;
382		imageMipTailMemoryBind.flags			= 0u;
383
384		imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
385	}
386
387	const Unique<VkSemaphore> imageMemoryBindSemaphore(makeSemaphore(deviceInterface, *m_logicalDevice));
388
389	VkBindSparseInfo bindSparseInfo =
390	{
391		VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,			//VkStructureType							sType;
392		DE_NULL,									//const void*								pNext;
393		0u,											//deUint32									waitSemaphoreCount;
394		DE_NULL,									//const VkSemaphore*						pWaitSemaphores;
395		0u,											//deUint32									bufferBindCount;
396		DE_NULL,									//const VkSparseBufferMemoryBindInfo*		pBufferBinds;
397		0u,											//deUint32									imageOpaqueBindCount;
398		DE_NULL,									//const VkSparseImageOpaqueMemoryBindInfo*	pImageOpaqueBinds;
399		0u,											//deUint32									imageBindCount;
400		DE_NULL,									//const VkSparseImageMemoryBindInfo*		pImageBinds;
401		1u,											//deUint32									signalSemaphoreCount;
402		&imageMemoryBindSemaphore.get()				//const VkSemaphore*						pSignalSemaphores;
403	};
404
405	VkSparseImageMemoryBindInfo		  imageResidencyBindInfo;
406	VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
407
408	if (imageResidencyMemoryBinds.size() > 0)
409	{
410		imageResidencyBindInfo.image	 = *imageSparse;
411		imageResidencyBindInfo.bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
412		imageResidencyBindInfo.pBinds    = &imageResidencyMemoryBinds[0];
413
414		bindSparseInfo.imageBindCount	 = 1u;
415		bindSparseInfo.pImageBinds		 = &imageResidencyBindInfo;
416	}
417
418	if (imageMipTailMemoryBinds.size() > 0)
419	{
420		imageMipTailBindInfo.image			= *imageSparse;
421		imageMipTailBindInfo.bindCount		= static_cast<deUint32>(imageMipTailMemoryBinds.size());
422		imageMipTailBindInfo.pBinds			= &imageMipTailMemoryBinds[0];
423
424		bindSparseInfo.imageOpaqueBindCount = 1u;
425		bindSparseInfo.pImageOpaqueBinds	= &imageMipTailBindInfo;
426	}
427
428	// Submit sparse bind commands for execution
429	VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
430
431	// Create command buffer for compute and transfer oparations
432	const Unique<VkCommandPool>	  commandPool(makeCommandPool(deviceInterface, *m_logicalDevice, computeQueue.queueFamilyIndex));
433	const Unique<VkCommandBuffer> commandBuffer(makeCommandBuffer(deviceInterface, *m_logicalDevice, *commandPool));
434
435	// Start recording commands
436	beginCommandBuffer(deviceInterface, *commandBuffer);
437
438	const deUint32				imageSizeInBytes		= getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels);
439	const VkBufferCreateInfo	inputBufferCreateInfo	= makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
440
441	const de::UniquePtr<Buffer>	inputBuffer(new Buffer(deviceInterface, *m_logicalDevice, *allocator, inputBufferCreateInfo, MemoryRequirement::HostVisible));
442
443	std::vector<deUint8> referenceData;
444	referenceData.resize(imageSizeInBytes);
445
446	for (deUint32 valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx)
447	{
448		referenceData[valueNdx] = static_cast<deUint8>((valueNdx % imageSparseMemRequirements.alignment) + 1u);
449	}
450
451	deMemcpy(inputBuffer->getAllocation().getHostPtr(), &referenceData[0], imageSizeInBytes);
452
453	flushMappedMemoryRange(deviceInterface, *m_logicalDevice, inputBuffer->getAllocation().getMemory(), inputBuffer->getAllocation().getOffset(), imageSizeInBytes);
454
455	const VkBufferMemoryBarrier inputBufferBarrier
456		= makeBufferMemoryBarrier(
457			VK_ACCESS_HOST_WRITE_BIT,
458			VK_ACCESS_TRANSFER_READ_BIT,
459			inputBuffer->get(),
460			0u,
461			imageSizeInBytes);
462
463	const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers);
464
465	const VkImageMemoryBarrier imageSparseTransferDstBarrier
466		= makeImageMemoryBarrier(
467			0u,
468			VK_ACCESS_TRANSFER_WRITE_BIT,
469			VK_IMAGE_LAYOUT_UNDEFINED,
470			VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
471			*imageSparse,
472			fullImageSubresourceRange);
473
474	deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 1u, &imageSparseTransferDstBarrier);
475
476	std::vector <VkBufferImageCopy> bufferImageCopy;
477	bufferImageCopy.resize(imageSparseInfo.mipLevels);
478
479	VkDeviceSize bufferOffset = 0;
480	for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; mipmapNdx++)
481	{
482		bufferImageCopy[mipmapNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipmapNdx), imageSparseInfo.arrayLayers, mipmapNdx, bufferOffset);
483
484		bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx);
485	}
486
487	deviceInterface.cmdCopyBufferToImage(*commandBuffer, inputBuffer->get(), *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
488
489	const VkImageMemoryBarrier imageSparseTransferSrcBarrier
490		= makeImageMemoryBarrier(
491		VK_ACCESS_TRANSFER_WRITE_BIT,
492		VK_ACCESS_TRANSFER_READ_BIT,
493		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
494		VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
495		*imageSparse,
496		fullImageSubresourceRange);
497
498	deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier);
499
500	const VkBufferCreateInfo	outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
501	const de::UniquePtr<Buffer>	outputBuffer(new Buffer(deviceInterface, *m_logicalDevice, *allocator, outputBufferCreateInfo, MemoryRequirement::HostVisible));
502
503	deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, outputBuffer->get(), static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
504
505	const VkBufferMemoryBarrier outputBufferBarrier
506		= makeBufferMemoryBarrier(
507		VK_ACCESS_TRANSFER_WRITE_BIT,
508		VK_ACCESS_HOST_READ_BIT,
509		outputBuffer->get(),
510		0u,
511		imageSizeInBytes);
512
513	deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
514
515	// End recording commands
516	endCommandBuffer(deviceInterface, *commandBuffer);
517
518	const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
519
520	// Submit commands for execution and wait for completion
521	submitCommandsAndWait(deviceInterface, *m_logicalDevice, computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits);
522
523	// Retrieve data from buffer to host memory
524	const Allocation& allocation = outputBuffer->getAllocation();
525
526	invalidateMappedMemoryRange(deviceInterface, *m_logicalDevice, allocation.getMemory(), allocation.getOffset(), imageSizeInBytes);
527
528	const deUint8*  outputData = static_cast<const deUint8*>(allocation.getHostPtr());
529	tcu::TestStatus testStatus = tcu::TestStatus::pass("Passed");
530
531	if (deMemCmp(outputData, &referenceData[0], imageSizeInBytes) != 0)
532	{
533		testStatus = tcu::TestStatus::fail("Failed");
534	}
535
536	// Wait for sparse queue to become idle
537	deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
538
539	return testStatus;
540}
541
542TestInstance* MipmapSparseResidencyCase::createInstance (Context& context) const
543{
544	return new MipmapSparseResidencyInstance(context, m_imageType, m_imageSize, m_format);
545}
546
547} // anonymous ns
548
549tcu::TestCaseGroup* createMipmapSparseResidencyTests (tcu::TestContext& testCtx)
550{
551	de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "mipmap_sparse_residency", "Mipmap Sparse Residency"));
552
553	static const deUint32 sizeCountPerImageType = 3u;
554
555	struct ImageParameters
556	{
557		ImageType	imageType;
558		tcu::UVec3	imageSizes[sizeCountPerImageType];
559	};
560
561	static const ImageParameters imageParametersArray[] =
562	{
563		{ IMAGE_TYPE_2D,		 { tcu::UVec3(512u, 256u, 1u),  tcu::UVec3(1024u, 128u, 1u), tcu::UVec3(11u, 137u, 1u) } },
564		{ IMAGE_TYPE_2D_ARRAY,	 { tcu::UVec3(512u, 256u, 6u),	tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u, 137u, 3u) } },
565		{ IMAGE_TYPE_CUBE,		 { tcu::UVec3(512u, 256u, 1u),	tcu::UVec3(1024u, 128u, 1u), tcu::UVec3(11u, 137u, 1u) } },
566		{ IMAGE_TYPE_CUBE_ARRAY, { tcu::UVec3(512u, 256u, 6u),	tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u, 137u, 3u) } },
567		{ IMAGE_TYPE_3D,		 { tcu::UVec3(256u, 256u, 16u), tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u, 137u, 3u) } }
568	};
569
570	static const tcu::TextureFormat formats[] =
571	{
572		tcu::TextureFormat(tcu::TextureFormat::R,	 tcu::TextureFormat::SIGNED_INT32),
573		tcu::TextureFormat(tcu::TextureFormat::R,	 tcu::TextureFormat::SIGNED_INT16),
574		tcu::TextureFormat(tcu::TextureFormat::R,	 tcu::TextureFormat::SIGNED_INT8),
575		tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT32),
576		tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT16),
577		tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8)
578	};
579
580	for (deInt32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray); ++imageTypeNdx)
581	{
582		const ImageType					imageType = imageParametersArray[imageTypeNdx].imageType;
583		de::MovePtr<tcu::TestCaseGroup> imageTypeGroup(new tcu::TestCaseGroup(testCtx, getImageTypeName(imageType).c_str(), ""));
584
585		for (deInt32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); ++formatNdx)
586		{
587			const tcu::TextureFormat&		format = formats[formatNdx];
588			de::MovePtr<tcu::TestCaseGroup> formatGroup(new tcu::TestCaseGroup(testCtx, getShaderImageFormatQualifier(format).c_str(), ""));
589
590			for (deInt32 imageSizeNdx = 0; imageSizeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray[imageTypeNdx].imageSizes); ++imageSizeNdx)
591			{
592				const tcu::UVec3 imageSize = imageParametersArray[imageTypeNdx].imageSizes[imageSizeNdx];
593
594				std::ostringstream stream;
595				stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z();
596
597				formatGroup->addChild(new MipmapSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format));
598			}
599			imageTypeGroup->addChild(formatGroup.release());
600		}
601		testGroup->addChild(imageTypeGroup.release());
602	}
603
604	return testGroup.release();
605}
606
607} // sparse
608} // vkt
609