1/*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 *      http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file  vktSparseResourcesMipmapSparseResidency.cpp
21 * \brief Sparse partially resident images with mipmaps tests
22 *//*--------------------------------------------------------------------*/
23
24#include "vktSparseResourcesMipmapSparseResidency.hpp"
25#include "vktSparseResourcesTestsUtil.hpp"
26#include "vktSparseResourcesBase.hpp"
27#include "vktTestCaseUtil.hpp"
28
29#include "vkDefs.hpp"
30#include "vkRef.hpp"
31#include "vkRefUtil.hpp"
32#include "vkPlatform.hpp"
33#include "vkPrograms.hpp"
34#include "vkMemUtil.hpp"
35#include "vkBuilderUtil.hpp"
36#include "vkImageUtil.hpp"
37#include "vkQueryUtil.hpp"
38#include "vkTypeUtil.hpp"
39
40#include "deUniquePtr.hpp"
41#include "deStringUtil.hpp"
42
43#include <string>
44#include <vector>
45
46using namespace vk;
47
48namespace vkt
49{
50namespace sparse
51{
52namespace
53{
54
55tcu::UVec3 alignedDivide (const VkExtent3D& extent, const VkExtent3D& divisor)
56{
57	tcu::UVec3 result;
58
59	result.x() = extent.width  / divisor.width  + ((extent.width  % divisor.width)  ? 1u : 0u);
60	result.y() = extent.height / divisor.height + ((extent.height % divisor.height) ? 1u : 0u);
61	result.z() = extent.depth  / divisor.depth  + ((extent.depth  % divisor.depth)  ? 1u : 0u);
62
63	return result;
64}
65
66class MipmapSparseResidencyCase : public TestCase
67{
68public:
69					MipmapSparseResidencyCase	(tcu::TestContext&			testCtx,
70												 const std::string&			name,
71												 const std::string&			description,
72												 const ImageType			imageType,
73												 const tcu::UVec3&			imageSize,
74												 const tcu::TextureFormat&	format,
75												 const bool					useDeviceGroups);
76
77
78	TestInstance*	createInstance				(Context&					context) const;
79
80private:
81	const bool					m_useDeviceGroups;
82	const ImageType				m_imageType;
83	const tcu::UVec3			m_imageSize;
84	const tcu::TextureFormat	m_format;
85};
86
87MipmapSparseResidencyCase::MipmapSparseResidencyCase (tcu::TestContext&			testCtx,
88													  const std::string&		name,
89													  const std::string&		description,
90													  const ImageType			imageType,
91													  const tcu::UVec3&			imageSize,
92													  const tcu::TextureFormat&	format,
93													  const bool				useDeviceGroups)
94	: TestCase				(testCtx, name, description)
95	, m_useDeviceGroups		(useDeviceGroups)
96	, m_imageType			(imageType)
97	, m_imageSize			(imageSize)
98	, m_format				(format)
99{
100}
101
102class MipmapSparseResidencyInstance : public SparseResourcesBaseInstance
103{
104public:
105					MipmapSparseResidencyInstance	(Context&									 context,
106													 const ImageType							 imageType,
107													 const tcu::UVec3&							 imageSize,
108													 const tcu::TextureFormat&					 format,
109													 const bool									 useDeviceGroups);
110
111
112	tcu::TestStatus	iterate							(void);
113
114private:
115	const bool					m_useDeviceGroups;
116	const ImageType				m_imageType;
117	const tcu::UVec3			m_imageSize;
118	const tcu::TextureFormat	m_format;
119};
120
121MipmapSparseResidencyInstance::MipmapSparseResidencyInstance (Context&					context,
122															  const ImageType			imageType,
123															  const tcu::UVec3&			imageSize,
124															  const tcu::TextureFormat&	format,
125															  const bool				useDeviceGroups)
126	: SparseResourcesBaseInstance	(context, useDeviceGroups)
127	, m_useDeviceGroups				(useDeviceGroups)
128	, m_imageType					(imageType)
129	, m_imageSize					(imageSize)
130	, m_format						(format)
131{
132}
133
134tcu::TestStatus MipmapSparseResidencyInstance::iterate (void)
135{
136	const InstanceInterface&	instance		= m_context.getInstanceInterface();
137	{
138		// Create logical device supporting both sparse and compute operations
139		QueueRequirementsVec queueRequirements;
140		queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
141		queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
142
143		createDeviceSupportingQueues(queueRequirements);
144	}
145
146	const VkPhysicalDevice		physicalDevice	= getPhysicalDevice();
147	VkImageCreateInfo			imageSparseInfo;
148	std::vector<DeviceMemorySp>	deviceMemUniquePtrVec;
149
150	// Check if image size does not exceed device limits
151	if (!isImageSizeSupported(instance, physicalDevice, m_imageType, m_imageSize))
152		TCU_THROW(NotSupportedError, "Image size not supported for device");
153
154	// Check if device supports sparse operations for image type
155	if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType))
156		TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported");
157
158	const DeviceInterface&	deviceInterface	= getDeviceInterface();
159	const Queue&			sparseQueue		= getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
160	const Queue&			computeQueue	= getQueue(VK_QUEUE_COMPUTE_BIT, 0);
161
162	// Go through all physical devices
163	for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
164	{
165		const deUint32	firstDeviceID			= physDevID;
166		const deUint32	secondDeviceID			= (firstDeviceID + 1) % m_numPhysicalDevices;
167
168		imageSparseInfo.sType					= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
169		imageSparseInfo.pNext					= DE_NULL;
170		imageSparseInfo.flags					= VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
171		imageSparseInfo.imageType				= mapImageType(m_imageType);
172		imageSparseInfo.format					= mapTextureFormat(m_format);
173		imageSparseInfo.extent					= makeExtent3D(getLayerSize(m_imageType, m_imageSize));
174		imageSparseInfo.arrayLayers				= getNumLayers(m_imageType, m_imageSize);
175		imageSparseInfo.samples					= VK_SAMPLE_COUNT_1_BIT;
176		imageSparseInfo.tiling					= VK_IMAGE_TILING_OPTIMAL;
177		imageSparseInfo.initialLayout			= VK_IMAGE_LAYOUT_UNDEFINED;
178		imageSparseInfo.usage					= VK_IMAGE_USAGE_TRANSFER_DST_BIT |
179												  VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
180		imageSparseInfo.sharingMode				= VK_SHARING_MODE_EXCLUSIVE;
181		imageSparseInfo.queueFamilyIndexCount	= 0u;
182		imageSparseInfo.pQueueFamilyIndices		= DE_NULL;
183
184		if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
185		{
186			imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
187		}
188
189		{
190			VkImageFormatProperties imageFormatProperties;
191			instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
192				imageSparseInfo.format,
193				imageSparseInfo.imageType,
194				imageSparseInfo.tiling,
195				imageSparseInfo.usage,
196				imageSparseInfo.flags,
197				&imageFormatProperties);
198
199			imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent);
200		}
201
202		// Check if device supports sparse operations for image format
203		if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
204			TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
205
206		// Create sparse image
207		const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
208
209		// Create sparse image memory bind semaphore
210		const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
211
212		{
213			// Get sparse image general memory requirements
214			const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
215
216			// Check if required image memory size does not exceed device limits
217			if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
218				TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
219
220			DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
221
222			// Get sparse image sparse memory requirements
223			const std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
224
225			DE_ASSERT(sparseMemoryRequirements.size() != 0);
226
227			const deUint32 colorAspectIndex		= getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
228			const deUint32 metadataAspectIndex	= getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
229
230			if (colorAspectIndex == NO_MATCH_FOUND)
231				TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
232
233			const VkSparseImageMemoryRequirements	aspectRequirements	= sparseMemoryRequirements[colorAspectIndex];
234			const VkImageAspectFlags				aspectMask			= aspectRequirements.formatProperties.aspectMask;
235			const VkExtent3D						imageGranularity	= aspectRequirements.formatProperties.imageGranularity;
236
237			DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
238
239			std::vector<VkSparseImageMemoryBind>	imageResidencyMemoryBinds;
240			std::vector<VkSparseMemoryBind>			imageMipTailMemoryBinds;
241
242			const deUint32							memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
243
244			if (memoryType == NO_MATCH_FOUND)
245				return tcu::TestStatus::fail("No matching memory type found");
246
247			// Bind memory for each layer
248			for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
249			{
250				for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
251				{
252					const VkExtent3D			mipExtent			= mipLevelExtents(imageSparseInfo.extent, mipLevelNdx);
253					const tcu::UVec3			sparseBlocks		= alignedDivide(mipExtent, imageGranularity);
254					const deUint32				numSparseBlocks		= sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
255					const VkImageSubresource	subresource			= { aspectMask, mipLevelNdx, layerNdx };
256
257					const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
258						imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
259
260					deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
261
262					imageResidencyMemoryBinds.push_back(imageMemoryBind);
263				}
264
265				if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
266				{
267					const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
268						aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
269
270					deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
271
272					imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
273				}
274
275				// Metadata
276				if (metadataAspectIndex != NO_MATCH_FOUND)
277				{
278					const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
279
280					if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
281					{
282						const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
283							metadataAspectRequirements.imageMipTailSize, memoryType,
284							metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride,
285							VK_SPARSE_MEMORY_BIND_METADATA_BIT);
286
287						deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
288
289						imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
290					}
291				}
292			}
293
294			if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
295			{
296				const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
297					aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
298
299				deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
300
301				imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
302			}
303
304			// Metadata
305			if (metadataAspectIndex != NO_MATCH_FOUND)
306			{
307				const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
308
309				if (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)
310				{
311					const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
312						metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset,
313						VK_SPARSE_MEMORY_BIND_METADATA_BIT);
314
315					deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
316
317					imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
318				}
319			}
320
321			const VkDeviceGroupBindSparseInfo devGroupBindSparseInfo =
322			{
323				VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR,	//VkStructureType							sType;
324				DE_NULL,												//const void*								pNext;
325				firstDeviceID,											//deUint32									resourceDeviceIndex;
326				secondDeviceID,											//deUint32									memoryDeviceIndex;
327			};
328
329			VkBindSparseInfo bindSparseInfo =
330			{
331				VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,						//VkStructureType							sType;
332				m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL,	//const void*								pNext;
333				0u,														//deUint32									waitSemaphoreCount;
334				DE_NULL,												//const VkSemaphore*						pWaitSemaphores;
335				0u,														//deUint32									bufferBindCount;
336				DE_NULL,												//const VkSparseBufferMemoryBindInfo*		pBufferBinds;
337				0u,														//deUint32									imageOpaqueBindCount;
338				DE_NULL,												//const VkSparseImageOpaqueMemoryBindInfo*	pImageOpaqueBinds;
339				0u,														//deUint32									imageBindCount;
340				DE_NULL,												//const VkSparseImageMemoryBindInfo*		pImageBinds;
341				1u,														//deUint32									signalSemaphoreCount;
342				&imageMemoryBindSemaphore.get()							//const VkSemaphore*						pSignalSemaphores;
343			};
344
345			VkSparseImageMemoryBindInfo			imageResidencyBindInfo;
346			VkSparseImageOpaqueMemoryBindInfo	imageMipTailBindInfo;
347
348			if (imageResidencyMemoryBinds.size() > 0)
349			{
350				imageResidencyBindInfo.image		= *imageSparse;
351				imageResidencyBindInfo.bindCount	= static_cast<deUint32>(imageResidencyMemoryBinds.size());
352				imageResidencyBindInfo.pBinds		= &imageResidencyMemoryBinds[0];
353
354				bindSparseInfo.imageBindCount		= 1u;
355				bindSparseInfo.pImageBinds			= &imageResidencyBindInfo;
356			}
357
358			if (imageMipTailMemoryBinds.size() > 0)
359			{
360				imageMipTailBindInfo.image			= *imageSparse;
361				imageMipTailBindInfo.bindCount		= static_cast<deUint32>(imageMipTailMemoryBinds.size());
362				imageMipTailBindInfo.pBinds			= &imageMipTailMemoryBinds[0];
363
364				bindSparseInfo.imageOpaqueBindCount	= 1u;
365				bindSparseInfo.pImageOpaqueBinds	= &imageMipTailBindInfo;
366			}
367
368			// Submit sparse bind commands for execution
369			VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
370		}
371
372		// Create command buffer for compute and transfer oparations
373		const Unique<VkCommandPool>	  commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
374		const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
375
376		std::vector <VkBufferImageCopy> bufferImageCopy(imageSparseInfo.mipLevels);
377
378		{
379			deUint32 bufferOffset = 0;
380			for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; mipmapNdx++)
381			{
382				bufferImageCopy[mipmapNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipmapNdx), imageSparseInfo.arrayLayers, mipmapNdx, static_cast<VkDeviceSize>(bufferOffset));
383				bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
384			}
385		}
386
387		// Start recording commands
388		beginCommandBuffer(deviceInterface, *commandBuffer);
389
390		const deUint32					imageSizeInBytes		= getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
391		const VkBufferCreateInfo		inputBufferCreateInfo	= makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
392		const Unique<VkBuffer>			inputBuffer				(createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
393		const de::UniquePtr<Allocation>	inputBufferAlloc		(bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
394
395		std::vector<deUint8> referenceData(imageSizeInBytes);
396
397		const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
398
399		for (deUint32 valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx)
400		{
401			referenceData[valueNdx] = static_cast<deUint8>((valueNdx % imageMemoryRequirements.alignment) + 1u);
402		}
403
404		deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes);
405
406		flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes);
407
408		{
409			const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
410			(
411				VK_ACCESS_HOST_WRITE_BIT,
412				VK_ACCESS_TRANSFER_READ_BIT,
413				*inputBuffer,
414				0u,
415				imageSizeInBytes
416			);
417
418			deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
419		}
420
421		{
422			const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier
423			(
424				0u,
425				VK_ACCESS_TRANSFER_WRITE_BIT,
426				VK_IMAGE_LAYOUT_UNDEFINED,
427				VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
428				sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
429				sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
430				*imageSparse,
431				makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
432			);
433
434			deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier);
435		}
436
437		deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
438
439		{
440			const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier
441			(
442				VK_ACCESS_TRANSFER_WRITE_BIT,
443				VK_ACCESS_TRANSFER_READ_BIT,
444				VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
445				VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
446				*imageSparse,
447				makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
448			);
449
450			deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier);
451		}
452
453		const VkBufferCreateInfo		outputBufferCreateInfo	= makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
454		const Unique<VkBuffer>			outputBuffer			(createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
455		const de::UniquePtr<Allocation>	outputBufferAlloc		(bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
456
457		deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
458
459		{
460			const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier
461			(
462				VK_ACCESS_TRANSFER_WRITE_BIT,
463				VK_ACCESS_HOST_READ_BIT,
464				*outputBuffer,
465				0u,
466				imageSizeInBytes
467			);
468
469			deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
470		}
471
472		// End recording commands
473		endCommandBuffer(deviceInterface, *commandBuffer);
474
475		const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
476
477		// Submit commands for execution and wait for completion
478		submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits,
479			0, DE_NULL, m_useDeviceGroups, firstDeviceID);
480
481		// Retrieve data from buffer to host memory
482		invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes);
483
484		const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
485
486		// Wait for sparse queue to become idle
487		deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
488
489		for (deUint32 mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
490		{
491			const deUint32 mipLevelSizeInBytes	= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipmapNdx);
492			const deUint32 bufferOffset			= static_cast<deUint32>(bufferImageCopy[mipmapNdx].bufferOffset);
493
494			if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
495				return tcu::TestStatus::fail("Failed");
496		}
497	}
498	return tcu::TestStatus::pass("Passed");
499}
500
501TestInstance* MipmapSparseResidencyCase::createInstance (Context& context) const
502{
503	return new MipmapSparseResidencyInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups);
504}
505
506} // anonymous ns
507
508tcu::TestCaseGroup* createMipmapSparseResidencyTestsCommon (tcu::TestContext& testCtx, de::MovePtr<tcu::TestCaseGroup> testGroup, const bool useDeviceGroup = false)
509{
510	static const deUint32 sizeCountPerImageType = 3u;
511
512	struct ImageParameters
513	{
514		ImageType	imageType;
515		tcu::UVec3	imageSizes[sizeCountPerImageType];
516	};
517
518	static const ImageParameters imageParametersArray[] =
519	{
520		{ IMAGE_TYPE_2D,		 { tcu::UVec3(512u, 256u, 1u),  tcu::UVec3(1024u, 128u, 1u), tcu::UVec3(11u,  137u, 1u) } },
521		{ IMAGE_TYPE_2D_ARRAY,	 { tcu::UVec3(512u, 256u, 6u),	tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u,  137u, 3u) } },
522		{ IMAGE_TYPE_CUBE,		 { tcu::UVec3(256u, 256u, 1u),	tcu::UVec3(128u,  128u, 1u), tcu::UVec3(137u, 137u, 1u) } },
523		{ IMAGE_TYPE_CUBE_ARRAY, { tcu::UVec3(256u, 256u, 6u),	tcu::UVec3(128u,  128u, 8u), tcu::UVec3(137u, 137u, 3u) } },
524		{ IMAGE_TYPE_3D,		 { tcu::UVec3(256u, 256u, 16u), tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u,  137u, 3u) } }
525	};
526
527	static const tcu::TextureFormat formats[] =
528	{
529		tcu::TextureFormat(tcu::TextureFormat::R,	 tcu::TextureFormat::SIGNED_INT32),
530		tcu::TextureFormat(tcu::TextureFormat::R,	 tcu::TextureFormat::SIGNED_INT16),
531		tcu::TextureFormat(tcu::TextureFormat::R,	 tcu::TextureFormat::SIGNED_INT8),
532		tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT32),
533		tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT16),
534		tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8)
535	};
536
537	for (deInt32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray); ++imageTypeNdx)
538	{
539		const ImageType					imageType = imageParametersArray[imageTypeNdx].imageType;
540		de::MovePtr<tcu::TestCaseGroup> imageTypeGroup(new tcu::TestCaseGroup(testCtx, getImageTypeName(imageType).c_str(), ""));
541
542		for (deInt32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); ++formatNdx)
543		{
544			const tcu::TextureFormat&		format = formats[formatNdx];
545			de::MovePtr<tcu::TestCaseGroup> formatGroup(new tcu::TestCaseGroup(testCtx, getShaderImageFormatQualifier(format).c_str(), ""));
546
547			for (deInt32 imageSizeNdx = 0; imageSizeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray[imageTypeNdx].imageSizes); ++imageSizeNdx)
548			{
549				const tcu::UVec3 imageSize = imageParametersArray[imageTypeNdx].imageSizes[imageSizeNdx];
550
551				std::ostringstream stream;
552				stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z();
553
554				formatGroup->addChild(new MipmapSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format, useDeviceGroup));
555			}
556			imageTypeGroup->addChild(formatGroup.release());
557		}
558		testGroup->addChild(imageTypeGroup.release());
559	}
560
561	return testGroup.release();
562}
563
564tcu::TestCaseGroup* createMipmapSparseResidencyTests (tcu::TestContext& testCtx)
565{
566	de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "mipmap_sparse_residency", "Mipmap Sparse Residency"));
567	return createMipmapSparseResidencyTestsCommon(testCtx, testGroup);
568}
569
570tcu::TestCaseGroup* createDeviceGroupMipmapSparseResidencyTests (tcu::TestContext& testCtx)
571{
572	de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "device_group_mipmap_sparse_residency", "Mipmap Sparse Residency"));
573	return createMipmapSparseResidencyTestsCommon(testCtx, testGroup, true);
574}
575
576} // sparse
577} // vkt
578