1/*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 *      http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Simple memory mapping tests.
22 *//*--------------------------------------------------------------------*/
23
24#include "vktMemoryMappingTests.hpp"
25
26#include "vktTestCaseUtil.hpp"
27
28#include "tcuMaybe.hpp"
29#include "tcuResultCollector.hpp"
30#include "tcuTestLog.hpp"
31#include "tcuPlatform.hpp"
32
33#include "vkDeviceUtil.hpp"
34#include "vkPlatform.hpp"
35#include "vkQueryUtil.hpp"
36#include "vkRef.hpp"
37#include "vkRefUtil.hpp"
38#include "vkStrUtil.hpp"
39#include "vkAllocationCallbackUtil.hpp"
40
41#include "deRandom.hpp"
42#include "deSharedPtr.hpp"
43#include "deStringUtil.hpp"
44#include "deUniquePtr.hpp"
45#include "deSTLUtil.hpp"
46#include "deMath.h"
47
48#include <string>
49#include <vector>
50#include <algorithm>
51
52using tcu::Maybe;
53using tcu::TestLog;
54
55using de::SharedPtr;
56
57using std::string;
58using std::vector;
59using std::pair;
60
61using namespace vk;
62
63namespace vkt
64{
65namespace memory
66{
67namespace
68{
69template<typename T>
70T divRoundUp (const T& a, const T& b)
71{
72	return (a / b) + (a % b == 0 ? 0 : 1);
73}
74
75template<typename T>
76T roundDownToMultiple (const T& a, const T& b)
77{
78	return b * (a / b);
79}
80
81template<typename T>
82T roundUpToMultiple (const T& a, const T& b)
83{
84	return b * (a / b + (a % b != 0 ? 1 : 0));
85}
86
87enum AllocationKind
88{
89	ALLOCATION_KIND_SUBALLOCATED										= 0,
90	ALLOCATION_KIND_DEDICATED_BUFFER									= 1,
91	ALLOCATION_KIND_DEDICATED_IMAGE										= 2,
92	ALLOCATION_KIND_LAST
93};
94
95// \note Bit vector that guarantees that each value takes only one bit.
96// std::vector<bool> is often optimized to only take one bit for each bool, but
97// that is implementation detail and in this case we really need to known how much
98// memory is used.
99class BitVector
100{
101public:
102	enum
103	{
104		BLOCK_BIT_SIZE = 8 * sizeof(deUint32)
105	};
106
107	BitVector (size_t size, bool value = false)
108		: m_data(divRoundUp<size_t>(size, (size_t)BLOCK_BIT_SIZE), value ? ~0x0u : 0x0u)
109	{
110	}
111
112	bool get (size_t ndx) const
113	{
114		return (m_data[ndx / BLOCK_BIT_SIZE] & (0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE))) != 0;
115	}
116
117	void set (size_t ndx, bool value)
118	{
119		if (value)
120			m_data[ndx / BLOCK_BIT_SIZE] |= 0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE);
121		else
122			m_data[ndx / BLOCK_BIT_SIZE] &= ~(0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE));
123	}
124
125	void setRange (size_t offset, size_t count, bool value)
126	{
127		size_t ndx = offset;
128
129		for (; (ndx < offset + count) && ((ndx % BLOCK_BIT_SIZE) != 0); ndx++)
130		{
131			DE_ASSERT(ndx >= offset);
132			DE_ASSERT(ndx < offset + count);
133			set(ndx, value);
134		}
135
136		{
137			const size_t endOfFullBlockNdx = roundDownToMultiple<size_t>(offset + count, BLOCK_BIT_SIZE);
138
139			if (ndx < endOfFullBlockNdx)
140			{
141				deMemset(&m_data[ndx / BLOCK_BIT_SIZE], (value ? 0xFF : 0x0), (endOfFullBlockNdx - ndx) / 8);
142				ndx = endOfFullBlockNdx;
143			}
144		}
145
146		for (; ndx < offset + count; ndx++)
147		{
148			DE_ASSERT(ndx >= offset);
149			DE_ASSERT(ndx < offset + count);
150			set(ndx, value);
151		}
152	}
153
154	void vectorAnd (const BitVector& other, size_t offset, size_t count)
155	{
156		size_t ndx = offset;
157
158		for (; ndx < offset + count && (ndx % BLOCK_BIT_SIZE) != 0; ndx++)
159		{
160			DE_ASSERT(ndx >= offset);
161			DE_ASSERT(ndx < offset + count);
162			set(ndx, other.get(ndx) && get(ndx));
163		}
164
165		for (; ndx < roundDownToMultiple<size_t>(offset + count, BLOCK_BIT_SIZE); ndx += BLOCK_BIT_SIZE)
166		{
167			DE_ASSERT(ndx >= offset);
168			DE_ASSERT(ndx < offset + count);
169			DE_ASSERT(ndx % BLOCK_BIT_SIZE == 0);
170			DE_ASSERT(ndx + BLOCK_BIT_SIZE <= offset + count);
171			m_data[ndx / BLOCK_BIT_SIZE] &= other.m_data[ndx / BLOCK_BIT_SIZE];
172		}
173
174		for (; ndx < offset + count; ndx++)
175		{
176			DE_ASSERT(ndx >= offset);
177			DE_ASSERT(ndx < offset + count);
178			set(ndx, other.get(ndx) && get(ndx));
179		}
180	}
181
182private:
183	vector<deUint32>	m_data;
184};
185
186class ReferenceMemory
187{
188public:
189	ReferenceMemory (size_t size, size_t atomSize)
190		: m_atomSize	(atomSize)
191		, m_bytes		(size, 0xDEu)
192		, m_defined		(size, false)
193		, m_flushed		(size / atomSize, false)
194	{
195		DE_ASSERT(size % m_atomSize == 0);
196	}
197
198	void write (size_t pos, deUint8 value)
199	{
200		m_bytes[pos] = value;
201		m_defined.set(pos, true);
202		m_flushed.set(pos / m_atomSize, false);
203	}
204
205	bool read (size_t pos, deUint8 value)
206	{
207		const bool isOk = !m_defined.get(pos)
208						|| m_bytes[pos] == value;
209
210		m_bytes[pos] = value;
211		m_defined.set(pos, true);
212
213		return isOk;
214	}
215
216	bool modifyXor (size_t pos, deUint8 value, deUint8 mask)
217	{
218		const bool isOk = !m_defined.get(pos)
219						|| m_bytes[pos] == value;
220
221		m_bytes[pos] = value ^ mask;
222		m_defined.set(pos, true);
223		m_flushed.set(pos / m_atomSize, false);
224
225		return isOk;
226	}
227
228	void flush (size_t offset, size_t size)
229	{
230		DE_ASSERT((offset % m_atomSize) == 0);
231		DE_ASSERT((size % m_atomSize) == 0);
232
233		m_flushed.setRange(offset / m_atomSize, size / m_atomSize, true);
234	}
235
236	void invalidate (size_t offset, size_t size)
237	{
238		DE_ASSERT((offset % m_atomSize) == 0);
239		DE_ASSERT((size % m_atomSize) == 0);
240
241		if (m_atomSize == 1)
242		{
243			m_defined.vectorAnd(m_flushed, offset, size);
244		}
245		else
246		{
247			for (size_t ndx = 0; ndx < size / m_atomSize; ndx++)
248			{
249				if (!m_flushed.get((offset / m_atomSize) + ndx))
250					m_defined.setRange(offset + ndx * m_atomSize, m_atomSize, false);
251			}
252		}
253	}
254
255
256private:
257	const size_t	m_atomSize;
258	vector<deUint8>	m_bytes;
259	BitVector		m_defined;
260	BitVector		m_flushed;
261};
262
263struct MemoryType
264{
265	MemoryType		(deUint32 index_, const VkMemoryType& type_)
266		: index	(index_)
267		, type	(type_)
268	{
269	}
270
271	MemoryType		(void)
272		: index	(~0u)
273	{
274	}
275
276	deUint32		index;
277	VkMemoryType	type;
278};
279
280size_t computeDeviceMemorySystemMemFootprint (const DeviceInterface& vk, VkDevice device)
281{
282	AllocationCallbackRecorder	callbackRecorder	(getSystemAllocator());
283
284	{
285		// 1 B allocation from memory type 0
286		const VkMemoryAllocateInfo	allocInfo	=
287		{
288			VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
289			DE_NULL,
290			1u,
291			0u,
292		};
293		const Unique<VkDeviceMemory>			memory			(allocateMemory(vk, device, &allocInfo));
294		AllocationCallbackValidationResults		validateRes;
295
296		validateAllocationCallbacks(callbackRecorder, &validateRes);
297
298		TCU_CHECK(validateRes.violations.empty());
299
300		return getLiveSystemAllocationTotal(validateRes)
301			   + sizeof(void*)*validateRes.liveAllocations.size(); // allocation overhead
302	}
303}
304
305Move<VkImage> makeImage (const DeviceInterface& vk, VkDevice device, VkDeviceSize size, deUint32 queueFamilyIndex)
306{
307	const VkDeviceSize					sizeInPixels					= (size + 3u) / 4u;
308	const deUint32						sqrtSize						= static_cast<deUint32>(deFloatCeil(deFloatSqrt(static_cast<float>(sizeInPixels))));
309	const deUint32						powerOfTwoSize					= deSmallestGreaterOrEquallPowerOfTwoU32(sqrtSize);
310	const VkImageCreateInfo				colorImageParams				=
311	{
312		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,							// VkStructureType			sType;
313		DE_NULL,														// const void*				pNext;
314		0u,																// VkImageCreateFlags		flags;
315		VK_IMAGE_TYPE_2D,												// VkImageType				imageType;
316		VK_FORMAT_R8G8B8A8_UINT,										// VkFormat					format;
317		{
318			powerOfTwoSize,
319			powerOfTwoSize,
320			1u
321		},																// VkExtent3D				extent;
322		1u,																// deUint32					mipLevels;
323		1u,																// deUint32					arraySize;
324		VK_SAMPLE_COUNT_1_BIT,											// deUint32					samples;
325		VK_IMAGE_TILING_LINEAR,											// VkImageTiling			tiling;
326		VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags		usage;
327		VK_SHARING_MODE_EXCLUSIVE,										// VkSharingMode			sharingMode;
328		1u,																// deUint32					queueFamilyCount;
329		&queueFamilyIndex,												// const deUint32*			pQueueFamilyIndices;
330		VK_IMAGE_LAYOUT_UNDEFINED,										// VkImageLayout			initialLayout;
331	};
332
333	return createImage(vk, device, &colorImageParams);
334}
335
336Move<VkBuffer> makeBuffer(const DeviceInterface& vk, VkDevice device, VkDeviceSize size, deUint32 queueFamilyIndex)
337{
338	const VkBufferCreateInfo			bufferParams					=
339	{
340		VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,							//	VkStructureType			sType;
341		DE_NULL,														//	const void*				pNext;
342		0u,																//	VkBufferCreateFlags		flags;
343		size,															//	VkDeviceSize			size;
344		VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, //	VkBufferUsageFlags	usage;
345		VK_SHARING_MODE_EXCLUSIVE,										//	VkSharingMode			sharingMode;
346		1u,																//	deUint32				queueFamilyCount;
347		&queueFamilyIndex,												//	const deUint32*			pQueueFamilyIndices;
348	};
349	return vk::createBuffer(vk, device, &bufferParams, (const VkAllocationCallbacks*)DE_NULL);
350}
351
352VkMemoryRequirements getImageMemoryRequirements(const DeviceInterface& vk, VkDevice device, Move<VkImage>& image)
353{
354	VkImageMemoryRequirementsInfo2	info								=
355	{
356		VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,				// VkStructureType			sType
357		DE_NULL,														// const void*				pNext
358		*image															// VkImage					image
359	};
360	VkMemoryDedicatedRequirements	dedicatedRequirements				=
361	{
362		VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS,				// VkStructureType			sType
363		DE_NULL,														// const void*				pNext
364		VK_FALSE,														// VkBool32					prefersDedicatedAllocation
365		VK_FALSE														// VkBool32					requiresDedicatedAllocation
366	};
367	VkMemoryRequirements2			req2								=
368	{
369		VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,						// VkStructureType			sType
370		&dedicatedRequirements,											// void*					pNext
371		{0, 0, 0}														// VkMemoryRequirements		memoryRequirements
372	};
373
374	vk.getImageMemoryRequirements2(device, &info, &req2);
375
376	return req2.memoryRequirements;
377}
378
379VkMemoryRequirements getBufferMemoryRequirements(const DeviceInterface& vk, VkDevice device, Move<VkBuffer>& buffer)
380{
381	VkBufferMemoryRequirementsInfo2	info								=
382	{
383		VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,			// VkStructureType			sType
384		DE_NULL,														// const void*				pNext
385		*buffer															// VkImage					image
386	};
387	VkMemoryDedicatedRequirements	dedicatedRequirements				=
388	{
389		VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS,				// VkStructureType			sType
390		DE_NULL,														// const void*				pNext
391		VK_FALSE,														// VkBool32					prefersDedicatedAllocation
392		VK_FALSE														// VkBool32					requiresDedicatedAllocation
393	};
394	VkMemoryRequirements2			req2								=
395	{
396		VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,						// VkStructureType		sType
397		&dedicatedRequirements,											// void*				pNext
398		{0, 0, 0}														// VkMemoryRequirements	memoryRequirements
399	};
400
401	vk.getBufferMemoryRequirements2(device, &info, &req2);
402
403	return req2.memoryRequirements;
404}
405
406Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex)
407{
408	const VkMemoryAllocateInfo			pAllocInfo						=
409	{
410		VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
411		DE_NULL,
412		pAllocInfo_allocationSize,
413		pAllocInfo_memoryTypeIndex,
414	};
415	return allocateMemory(vk, device, &pAllocInfo);
416}
417
418Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex, Move<VkImage>& image, Move<VkBuffer>& buffer)
419{
420	DE_ASSERT((!image) || (!buffer));
421
422	const VkMemoryDedicatedAllocateInfo
423										dedicatedAllocateInfo			=
424	{
425		VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,			// VkStructureType		sType
426		DE_NULL,														// const void*			pNext
427		*image,															// VkImage				image
428		*buffer															// VkBuffer				buffer
429	};
430
431	const VkMemoryAllocateInfo			pAllocInfo						=
432	{
433		VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
434		!image && !buffer ? DE_NULL : &dedicatedAllocateInfo,
435		pAllocInfo_allocationSize,
436		pAllocInfo_memoryTypeIndex,
437	};
438	return allocateMemory(vk, device, &pAllocInfo);
439}
440
441struct MemoryRange
442{
443	MemoryRange (VkDeviceSize offset_ = ~(VkDeviceSize)0, VkDeviceSize size_ = ~(VkDeviceSize)0)
444		: offset	(offset_)
445		, size		(size_)
446	{
447	}
448
449	VkDeviceSize	offset;
450	VkDeviceSize	size;
451};
452
453struct TestConfig
454{
455	TestConfig (void)
456		: allocationSize	(~(VkDeviceSize)0)
457		, allocationKind	(ALLOCATION_KIND_SUBALLOCATED)
458	{
459	}
460
461	VkDeviceSize		allocationSize;
462	deUint32			seed;
463
464	MemoryRange			mapping;
465	vector<MemoryRange>	flushMappings;
466	vector<MemoryRange>	invalidateMappings;
467	bool				remap;
468	AllocationKind		allocationKind;
469};
470
471bool compareAndLogBuffer (TestLog& log, size_t size, const deUint8* result, const deUint8* reference)
472{
473	size_t	failedBytes	= 0;
474	size_t	firstFailed	= (size_t)-1;
475
476	for (size_t ndx = 0; ndx < size; ndx++)
477	{
478		if (result[ndx] != reference[ndx])
479		{
480			failedBytes++;
481
482			if (firstFailed == (size_t)-1)
483				firstFailed = ndx;
484		}
485	}
486
487	if (failedBytes > 0)
488	{
489		log << TestLog::Message << "Comparison failed. Failed bytes " << failedBytes << ". First failed at offset " << firstFailed << "." << TestLog::EndMessage;
490
491		std::ostringstream	expectedValues;
492		std::ostringstream	resultValues;
493
494		for (size_t ndx = firstFailed; ndx < firstFailed + 10 && ndx < size; ndx++)
495		{
496			if (ndx != firstFailed)
497			{
498				expectedValues << ", ";
499				resultValues << ", ";
500			}
501
502			expectedValues << reference[ndx];
503			resultValues << result[ndx];
504		}
505
506		if (firstFailed + 10 < size)
507		{
508			expectedValues << "...";
509			resultValues << "...";
510		}
511
512		log << TestLog::Message << "Expected values at offset: " << firstFailed << ", " << expectedValues.str() << TestLog::EndMessage;
513		log << TestLog::Message << "Result values at offset: " << firstFailed << ", " << resultValues.str() << TestLog::EndMessage;
514
515		return false;
516	}
517	else
518		return true;
519}
520
521tcu::TestStatus testMemoryMapping (Context& context, const TestConfig config)
522{
523	TestLog&								log							= context.getTestContext().getLog();
524	tcu::ResultCollector					result						(log);
525	bool									atLeastOneTestPerformed		= false;
526	const VkPhysicalDevice					physicalDevice				= context.getPhysicalDevice();
527	const VkDevice							device						= context.getDevice();
528	const InstanceInterface&				vki							= context.getInstanceInterface();
529	const DeviceInterface&					vkd							= context.getDeviceInterface();
530	const VkPhysicalDeviceMemoryProperties	memoryProperties			= getPhysicalDeviceMemoryProperties(vki, physicalDevice);
531	// \todo [2016-05-27 misojarvi] Remove once drivers start reporting correctly nonCoherentAtomSize that is at least 1.
532	const VkDeviceSize						nonCoherentAtomSize			= context.getDeviceProperties().limits.nonCoherentAtomSize != 0
533																		? context.getDeviceProperties().limits.nonCoherentAtomSize
534																		: 1;
535	const deUint32							queueFamilyIndex			= context.getUniversalQueueFamilyIndex();
536
537	if (config.allocationKind == ALLOCATION_KIND_DEDICATED_IMAGE
538	||	config.allocationKind == ALLOCATION_KIND_DEDICATED_BUFFER)
539	{
540		const std::vector<std::string>&		extensions					= context.getDeviceExtensions();
541		const deBool						isSupported					= isDeviceExtensionSupported(context.getUsedApiVersion(), extensions, "VK_KHR_dedicated_allocation");
542		if (!isSupported)
543		{
544			TCU_THROW(NotSupportedError, "Not supported");
545		}
546	}
547
548	{
549		const tcu::ScopedLogSection	section	(log, "TestCaseInfo", "TestCaseInfo");
550
551		log << TestLog::Message << "Seed: " << config.seed << TestLog::EndMessage;
552		log << TestLog::Message << "Allocation size: " << config.allocationSize  <<  TestLog::EndMessage;
553		log << TestLog::Message << "Mapping, offset: " << config.mapping.offset << ", size: " << config.mapping.size << TestLog::EndMessage;
554
555		if (!config.flushMappings.empty())
556		{
557			log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
558
559			for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
560				log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset << ", Size: " << config.flushMappings[ndx].size << TestLog::EndMessage;
561		}
562
563		if (config.remap)
564			log << TestLog::Message << "Remapping memory between flush and invalidation." << TestLog::EndMessage;
565
566		if (!config.invalidateMappings.empty())
567		{
568			log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
569
570			for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
571				log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset << ", Size: " << config.invalidateMappings[ndx].size << TestLog::EndMessage;
572		}
573	}
574
575	for (deUint32 memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
576	{
577		try
578		{
579			const tcu::ScopedLogSection		section		(log, "MemoryType" + de::toString(memoryTypeIndex), "MemoryType" + de::toString(memoryTypeIndex));
580			const vk::VkMemoryType&			memoryType	= memoryProperties.memoryTypes[memoryTypeIndex];
581			const VkMemoryHeap&				memoryHeap	= memoryProperties.memoryHeaps[memoryType.heapIndex];
582			const VkDeviceSize				atomSize	= (memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
583														? 1
584														: nonCoherentAtomSize;
585
586			VkDeviceSize					allocationSize				= (config.allocationSize % atomSize == 0) ? config.allocationSize : config.allocationSize + (atomSize - (config.allocationSize % atomSize));
587			vk::VkMemoryRequirements		req							=
588			{
589				(VkDeviceSize)allocationSize,
590				(VkDeviceSize)0,
591				~(deUint32)0u
592			};
593			Move<VkImage>					image;
594			Move<VkBuffer>					buffer;
595
596			if (config.allocationKind == ALLOCATION_KIND_DEDICATED_IMAGE)
597			{
598				image = makeImage(vkd, device, allocationSize, queueFamilyIndex);
599				req = getImageMemoryRequirements(vkd, device, image);
600			}
601			else if (config.allocationKind == ALLOCATION_KIND_DEDICATED_BUFFER)
602			{
603				buffer = makeBuffer(vkd, device, allocationSize, queueFamilyIndex);
604				req = getBufferMemoryRequirements(vkd, device, buffer);
605			}
606			allocationSize = req.size;
607			VkDeviceSize					mappingSize					=  (config.mapping.size % atomSize == 0) ? config.mapping.size : config.mapping.size + (atomSize - (config.mapping.size % atomSize));
608			VkDeviceSize					mappingOffset				=  (config.mapping.offset % atomSize == 0) ? config.mapping.offset : config.mapping.offset + (atomSize - (config.mapping.offset % atomSize));
609			if (config.mapping.size == config.allocationSize && config.mapping.offset == 0u)
610			{
611				mappingSize = allocationSize;
612			}
613
614			log << TestLog::Message << "MemoryType: " << memoryType << TestLog::EndMessage;
615			log << TestLog::Message << "MemoryHeap: " << memoryHeap << TestLog::EndMessage;
616			log << TestLog::Message << "AtomSize: " << atomSize << TestLog::EndMessage;
617			log << TestLog::Message << "AllocationSize: " << allocationSize << TestLog::EndMessage;
618			log << TestLog::Message << "Mapping, offset: " << mappingOffset << ", size: " << mappingSize << TestLog::EndMessage;
619
620			if ((req.memoryTypeBits & (1u << memoryTypeIndex)) == 0)
621			{
622				static const char* const allocationKindName[] =
623				{
624					"suballocation",
625					"dedicated allocation of buffers",
626					"dedicated allocation of images"
627				};
628				log << TestLog::Message << "Memory type does not support " << allocationKindName[static_cast<deUint32>(config.allocationKind)] << '.' << TestLog::EndMessage;
629				continue;
630			}
631
632			if (!config.flushMappings.empty())
633			{
634				log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
635
636				for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
637				{
638					const VkDeviceSize	offset	= (config.flushMappings[ndx].offset % atomSize == 0) ? config.flushMappings[ndx].offset : config.flushMappings[ndx].offset + (atomSize - (config.flushMappings[ndx].offset % atomSize));
639					const VkDeviceSize	size	= (config.flushMappings[ndx].size % atomSize == 0) ? config.flushMappings[ndx].size : config.flushMappings[ndx].size + (atomSize - (config.flushMappings[ndx].size % atomSize));
640					log << TestLog::Message << "\tOffset: " << offset << ", Size: " << size << TestLog::EndMessage;
641				}
642			}
643
644			if (!config.invalidateMappings.empty())
645			{
646				log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
647
648				for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
649				{
650					const VkDeviceSize	offset = (config.invalidateMappings[ndx].offset % atomSize == 0) ? config.invalidateMappings[ndx].offset : config.invalidateMappings[ndx].offset + (atomSize - (config.invalidateMappings[ndx].offset % atomSize));
651					const VkDeviceSize	size = (config.invalidateMappings[ndx].size % atomSize == 0) ? config.invalidateMappings[ndx].size : config.invalidateMappings[ndx].size + (atomSize - (config.invalidateMappings[ndx].size % atomSize));
652					log << TestLog::Message << "\tOffset: " << offset << ", Size: " << size << TestLog::EndMessage;
653				}
654			}
655
656			if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
657			{
658				log << TestLog::Message << "Memory type doesn't support mapping." << TestLog::EndMessage;
659			}
660			else if (memoryHeap.size <= 4 * allocationSize)
661			{
662				log << TestLog::Message << "Memory type's heap is too small." << TestLog::EndMessage;
663			}
664			else
665			{
666				atLeastOneTestPerformed = true;
667				const Unique<VkDeviceMemory>	memory				(allocMemory(vkd, device, allocationSize, memoryTypeIndex, image, buffer));
668				de::Random						rng					(config.seed);
669				vector<deUint8>					reference			((size_t)(allocationSize));
670				deUint8*						mapping				= DE_NULL;
671
672				{
673					void* ptr;
674					VK_CHECK(vkd.mapMemory(device, *memory, mappingOffset, mappingSize, 0u, &ptr));
675					TCU_CHECK(ptr);
676
677					mapping = (deUint8*)ptr;
678				}
679
680				for (VkDeviceSize ndx = 0; ndx < mappingSize; ndx++)
681				{
682					const deUint8 val = rng.getUint8();
683
684					mapping[ndx]												= val;
685					reference[(size_t)(mappingOffset + ndx)]	= val;
686				}
687
688				if (!config.flushMappings.empty())
689				{
690					vector<VkMappedMemoryRange> ranges;
691
692					for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
693					{
694						const VkMappedMemoryRange range =
695						{
696							VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
697							DE_NULL,
698
699							*memory,
700							(config.flushMappings[ndx].offset % atomSize == 0) ? config.flushMappings[ndx].offset : config.flushMappings[ndx].offset + (atomSize - (config.flushMappings[ndx].offset % atomSize)),
701							(config.flushMappings[ndx].size % atomSize == 0) ? config.flushMappings[ndx].size : config.flushMappings[ndx].size + (atomSize - (config.flushMappings[ndx].size % atomSize)),
702						};
703
704						ranges.push_back(range);
705					}
706
707					VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), &ranges[0]));
708				}
709
710				if (config.remap)
711				{
712					void* ptr;
713					vkd.unmapMemory(device, *memory);
714					VK_CHECK(vkd.mapMemory(device, *memory, mappingOffset, mappingSize, 0u, &ptr));
715					TCU_CHECK(ptr);
716
717					mapping = (deUint8*)ptr;
718				}
719
720				if (!config.invalidateMappings.empty())
721				{
722					vector<VkMappedMemoryRange> ranges;
723
724					for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
725					{
726						const VkMappedMemoryRange range =
727						{
728							VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
729							DE_NULL,
730
731							*memory,
732							(config.invalidateMappings[ndx].offset % atomSize == 0) ? config.invalidateMappings[ndx].offset : config.invalidateMappings[ndx].offset + (atomSize - (config.invalidateMappings[ndx].offset % atomSize)),
733							(config.invalidateMappings[ndx].size % atomSize == 0) ? config.invalidateMappings[ndx].size : config.invalidateMappings[ndx].size + (atomSize - (config.invalidateMappings[ndx].size % atomSize)),
734						};
735
736						ranges.push_back(range);
737					}
738
739					VK_CHECK(vkd.invalidateMappedMemoryRanges(device, static_cast<deUint32>(ranges.size()), &ranges[0]));
740				}
741
742				if (!compareAndLogBuffer(log, static_cast<size_t>(mappingSize), mapping, &reference[static_cast<size_t>(mappingOffset)]))
743					result.fail("Unexpected values read from mapped memory.");
744
745				vkd.unmapMemory(device, *memory);
746			}
747		}
748		catch (const tcu::TestError& error)
749		{
750			result.fail(error.getMessage());
751		}
752	}
753
754	if (!atLeastOneTestPerformed)
755		result.addResult(QP_TEST_RESULT_NOT_SUPPORTED, "No suitable memory kind found to perform test.");
756
757	return tcu::TestStatus(result.getResult(), result.getMessage());
758}
759
760class MemoryMapping
761{
762public:
763						MemoryMapping	(const MemoryRange&	range,
764										 void*				ptr,
765										 ReferenceMemory&	reference);
766
767	void				randomRead		(de::Random& rng);
768	void				randomWrite		(de::Random& rng);
769	void				randomModify	(de::Random& rng);
770
771	const MemoryRange&	getRange		(void) const { return m_range; }
772
773private:
774	MemoryRange			m_range;
775	void*				m_ptr;
776	ReferenceMemory&	m_reference;
777};
778
779MemoryMapping::MemoryMapping (const MemoryRange&	range,
780							  void*					ptr,
781							  ReferenceMemory&		reference)
782	: m_range		(range)
783	, m_ptr			(ptr)
784	, m_reference	(reference)
785{
786	DE_ASSERT(range.size > 0);
787}
788
789void MemoryMapping::randomRead (de::Random& rng)
790{
791	const size_t count = (size_t)rng.getInt(0, 100);
792
793	for (size_t ndx = 0; ndx < count; ndx++)
794	{
795		const size_t	pos	= (size_t)(rng.getUint64() % (deUint64)m_range.size);
796		const deUint8	val	= ((deUint8*)m_ptr)[pos];
797
798		TCU_CHECK(m_reference.read((size_t)(m_range.offset + pos), val));
799	}
800}
801
802void MemoryMapping::randomWrite (de::Random& rng)
803{
804	const size_t count = (size_t)rng.getInt(0, 100);
805
806	for (size_t ndx = 0; ndx < count; ndx++)
807	{
808		const size_t	pos	= (size_t)(rng.getUint64() % (deUint64)m_range.size);
809		const deUint8	val	= rng.getUint8();
810
811		((deUint8*)m_ptr)[pos]	= val;
812		m_reference.write((size_t)(m_range.offset + pos), val);
813	}
814}
815
816void MemoryMapping::randomModify (de::Random& rng)
817{
818	const size_t count = (size_t)rng.getInt(0, 100);
819
820	for (size_t ndx = 0; ndx < count; ndx++)
821	{
822		const size_t	pos		= (size_t)(rng.getUint64() % (deUint64)m_range.size);
823		const deUint8	val		= ((deUint8*)m_ptr)[pos];
824		const deUint8	mask	= rng.getUint8();
825
826		((deUint8*)m_ptr)[pos]	= val ^ mask;
827		TCU_CHECK(m_reference.modifyXor((size_t)(m_range.offset + pos), val, mask));
828	}
829}
830
831VkDeviceSize randomSize (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxSize)
832{
833	const VkDeviceSize maxSizeInAtoms = maxSize / atomSize;
834
835	DE_ASSERT(maxSizeInAtoms > 0);
836
837	return maxSizeInAtoms > 1
838			? atomSize * (1 + (VkDeviceSize)(rng.getUint64() % (deUint64)maxSizeInAtoms))
839			: atomSize;
840}
841
842VkDeviceSize randomOffset (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxOffset)
843{
844	const VkDeviceSize maxOffsetInAtoms = maxOffset / atomSize;
845
846	return maxOffsetInAtoms > 0
847			? atomSize * (VkDeviceSize)(rng.getUint64() % (deUint64)(maxOffsetInAtoms + 1))
848			: 0;
849}
850
851void randomRanges (de::Random& rng, vector<VkMappedMemoryRange>& ranges, size_t count, VkDeviceMemory memory, VkDeviceSize minOffset, VkDeviceSize maxSize, VkDeviceSize atomSize)
852{
853	ranges.resize(count);
854
855	for (size_t rangeNdx = 0; rangeNdx < count; rangeNdx++)
856	{
857		const VkDeviceSize	size	= randomSize(rng, atomSize, maxSize);
858		const VkDeviceSize	offset	= minOffset + randomOffset(rng, atomSize, maxSize - size);
859
860		const VkMappedMemoryRange range =
861		{
862			VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
863			DE_NULL,
864
865			memory,
866			offset,
867			size
868		};
869		ranges[rangeNdx] = range;
870	}
871}
872
873class MemoryObject
874{
875public:
876							MemoryObject			(const DeviceInterface&		vkd,
877													 VkDevice					device,
878													 VkDeviceSize				size,
879													 deUint32					memoryTypeIndex,
880													 VkDeviceSize				atomSize,
881													 VkDeviceSize				memoryUsage,
882													 VkDeviceSize				referenceMemoryUsage);
883
884							~MemoryObject			(void);
885
886	MemoryMapping*			mapRandom				(const DeviceInterface& vkd, VkDevice device, de::Random& rng);
887	void					unmap					(void);
888
889	void					randomFlush				(const DeviceInterface& vkd, VkDevice device, de::Random& rng);
890	void					randomInvalidate		(const DeviceInterface& vkd, VkDevice device, de::Random& rng);
891
892	VkDeviceSize			getSize					(void) const { return m_size; }
893	MemoryMapping*			getMapping				(void) { return m_mapping; }
894
895	VkDeviceSize			getMemoryUsage			(void) const { return m_memoryUsage; }
896	VkDeviceSize			getReferenceMemoryUsage	(void) const { return m_referenceMemoryUsage; }
897private:
898	const DeviceInterface&	m_vkd;
899	const VkDevice			m_device;
900
901	const deUint32			m_memoryTypeIndex;
902	const VkDeviceSize		m_size;
903	const VkDeviceSize		m_atomSize;
904	const VkDeviceSize		m_memoryUsage;
905	const VkDeviceSize		m_referenceMemoryUsage;
906
907	Move<VkDeviceMemory>	m_memory;
908
909	MemoryMapping*			m_mapping;
910	ReferenceMemory			m_referenceMemory;
911};
912
913MemoryObject::MemoryObject (const DeviceInterface&		vkd,
914							VkDevice					device,
915							VkDeviceSize				size,
916							deUint32					memoryTypeIndex,
917							VkDeviceSize				atomSize,
918							VkDeviceSize				memoryUsage,
919							VkDeviceSize				referenceMemoryUsage)
920	: m_vkd						(vkd)
921	, m_device					(device)
922	, m_memoryTypeIndex			(memoryTypeIndex)
923	, m_size					(size)
924	, m_atomSize				(atomSize)
925	, m_memoryUsage				(memoryUsage)
926	, m_referenceMemoryUsage	(referenceMemoryUsage)
927	, m_mapping					(DE_NULL)
928	, m_referenceMemory			((size_t)size, (size_t)m_atomSize)
929{
930	m_memory = allocMemory(m_vkd, m_device, m_size, m_memoryTypeIndex);
931}
932
933MemoryObject::~MemoryObject (void)
934{
935	delete m_mapping;
936}
937
938MemoryMapping* MemoryObject::mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
939{
940	const VkDeviceSize	size	= randomSize(rng, m_atomSize, m_size);
941	const VkDeviceSize	offset	= randomOffset(rng, m_atomSize, m_size - size);
942	void*				ptr;
943
944	DE_ASSERT(!m_mapping);
945
946	VK_CHECK(vkd.mapMemory(device, *m_memory, offset, size, 0u, &ptr));
947	TCU_CHECK(ptr);
948	m_mapping = new MemoryMapping(MemoryRange(offset, size), ptr, m_referenceMemory);
949
950	return m_mapping;
951}
952
953void MemoryObject::unmap (void)
954{
955	m_vkd.unmapMemory(m_device, *m_memory);
956
957	delete m_mapping;
958	m_mapping = DE_NULL;
959}
960
961void MemoryObject::randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
962{
963	const size_t				rangeCount	= (size_t)rng.getInt(1, 10);
964	vector<VkMappedMemoryRange>	ranges		(rangeCount);
965
966	randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
967
968	for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
969		m_referenceMemory.flush((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
970
971	VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
972}
973
974void MemoryObject::randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
975{
976	const size_t				rangeCount	= (size_t)rng.getInt(1, 10);
977	vector<VkMappedMemoryRange>	ranges		(rangeCount);
978
979	randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
980
981	for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
982		m_referenceMemory.invalidate((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
983
984	VK_CHECK(vkd.invalidateMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
985}
986
987enum
988{
989	MAX_MEMORY_USAGE_DIV = 2, // Use only 1/2 of each memory heap.
990	MAX_MEMORY_ALLOC_DIV = 2, // Do not alloc more than 1/2 of available space.
991};
992
993template<typename T>
994void removeFirstEqual (vector<T>& vec, const T& val)
995{
996	for (size_t ndx = 0; ndx < vec.size(); ndx++)
997	{
998		if (vec[ndx] == val)
999		{
1000			vec[ndx] = vec.back();
1001			vec.pop_back();
1002			return;
1003		}
1004	}
1005}
1006
1007enum MemoryClass
1008{
1009	MEMORY_CLASS_SYSTEM = 0,
1010	MEMORY_CLASS_DEVICE,
1011
1012	MEMORY_CLASS_LAST
1013};
1014
1015// \todo [2016-04-20 pyry] Consider estimating memory fragmentation
1016class TotalMemoryTracker
1017{
1018public:
1019					TotalMemoryTracker	(void)
1020	{
1021		std::fill(DE_ARRAY_BEGIN(m_usage), DE_ARRAY_END(m_usage), 0);
1022	}
1023
1024	void			allocate			(MemoryClass memClass, VkDeviceSize size)
1025	{
1026		m_usage[memClass] += size;
1027	}
1028
1029	void			free				(MemoryClass memClass, VkDeviceSize size)
1030	{
1031		DE_ASSERT(size <= m_usage[memClass]);
1032		m_usage[memClass] -= size;
1033	}
1034
1035	VkDeviceSize	getUsage			(MemoryClass memClass) const
1036	{
1037		return m_usage[memClass];
1038	}
1039
1040	VkDeviceSize	getTotalUsage		(void) const
1041	{
1042		VkDeviceSize total = 0;
1043		for (int ndx = 0; ndx < MEMORY_CLASS_LAST; ++ndx)
1044			total += getUsage((MemoryClass)ndx);
1045		return total;
1046	}
1047
1048private:
1049	VkDeviceSize	m_usage[MEMORY_CLASS_LAST];
1050};
1051
1052VkDeviceSize getHostPageSize (void)
1053{
1054	return 4096;
1055}
1056
1057VkDeviceSize getMinAtomSize (VkDeviceSize nonCoherentAtomSize, const vector<MemoryType>& memoryTypes)
1058{
1059	for (size_t ndx = 0; ndx < memoryTypes.size(); ndx++)
1060	{
1061		if ((memoryTypes[ndx].type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
1062			return 1;
1063	}
1064
1065	return nonCoherentAtomSize;
1066}
1067
1068class MemoryHeap
1069{
1070public:
1071	MemoryHeap (const VkMemoryHeap&			heap,
1072				const vector<MemoryType>&	memoryTypes,
1073				const PlatformMemoryLimits&	memoryLimits,
1074				const VkDeviceSize			nonCoherentAtomSize,
1075				TotalMemoryTracker&			totalMemTracker)
1076		: m_heap				(heap)
1077		, m_memoryTypes			(memoryTypes)
1078		, m_limits				(memoryLimits)
1079		, m_nonCoherentAtomSize	(nonCoherentAtomSize)
1080		, m_minAtomSize			(getMinAtomSize(nonCoherentAtomSize, memoryTypes))
1081		, m_totalMemTracker		(totalMemTracker)
1082		, m_usage				(0)
1083	{
1084	}
1085
1086	~MemoryHeap (void)
1087	{
1088		for (vector<MemoryObject*>::iterator iter = m_objects.begin(); iter != m_objects.end(); ++iter)
1089			delete *iter;
1090	}
1091
1092	bool								full			(void) const;
1093	bool								empty			(void) const
1094	{
1095		return m_usage == 0 && !full();
1096	}
1097
1098	MemoryObject*						allocateRandom	(const DeviceInterface& vkd, VkDevice device, de::Random& rng);
1099
1100	MemoryObject*						getRandomObject	(de::Random& rng) const
1101	{
1102		return rng.choose<MemoryObject*>(m_objects.begin(), m_objects.end());
1103	}
1104
1105	void								free			(MemoryObject* object)
1106	{
1107		removeFirstEqual(m_objects, object);
1108		m_usage -= object->getMemoryUsage();
1109		m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, object->getReferenceMemoryUsage());
1110		m_totalMemTracker.free(getMemoryClass(), object->getMemoryUsage());
1111		delete object;
1112	}
1113
1114private:
1115	MemoryClass							getMemoryClass	(void) const
1116	{
1117		if ((m_heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
1118			return MEMORY_CLASS_DEVICE;
1119		else
1120			return MEMORY_CLASS_SYSTEM;
1121	}
1122
1123	const VkMemoryHeap			m_heap;
1124	const vector<MemoryType>	m_memoryTypes;
1125	const PlatformMemoryLimits&	m_limits;
1126	const VkDeviceSize			m_nonCoherentAtomSize;
1127	const VkDeviceSize			m_minAtomSize;
1128	TotalMemoryTracker&			m_totalMemTracker;
1129
1130	VkDeviceSize				m_usage;
1131	vector<MemoryObject*>		m_objects;
1132};
1133
1134// Heap is full if there is not enough memory to allocate minimal memory object.
1135bool MemoryHeap::full (void) const
1136{
1137	DE_ASSERT(m_usage <= m_heap.size/MAX_MEMORY_USAGE_DIV);
1138
1139	const VkDeviceSize	availableInHeap		= m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
1140	const bool			isUMA				= m_limits.totalDeviceLocalMemory == 0;
1141	const MemoryClass	memClass			= getMemoryClass();
1142	const VkDeviceSize	minAllocationSize	= de::max(m_minAtomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1143	// Memory required for reference. One byte and one bit for each byte and one bit per each m_atomSize.
1144	const VkDeviceSize	minReferenceSize	= minAllocationSize
1145											+ divRoundUp<VkDeviceSize>(minAllocationSize,  8)
1146											+ divRoundUp<VkDeviceSize>(minAllocationSize,  m_minAtomSize * 8);
1147
1148	if (isUMA)
1149	{
1150		const VkDeviceSize	totalUsage	= m_totalMemTracker.getTotalUsage();
1151		const VkDeviceSize	totalSysMem	= (VkDeviceSize)m_limits.totalSystemMemory;
1152
1153		DE_ASSERT(totalUsage <= totalSysMem);
1154
1155		return (minAllocationSize + minReferenceSize) > (totalSysMem - totalUsage)
1156				|| minAllocationSize > availableInHeap;
1157	}
1158	else
1159	{
1160		const VkDeviceSize	totalUsage		= m_totalMemTracker.getTotalUsage();
1161		const VkDeviceSize	totalSysMem		= (VkDeviceSize)m_limits.totalSystemMemory;
1162
1163		const VkDeviceSize	totalMemClass	= memClass == MEMORY_CLASS_SYSTEM
1164											? m_limits.totalSystemMemory
1165											: m_limits.totalDeviceLocalMemory;
1166		const VkDeviceSize	usedMemClass	= m_totalMemTracker.getUsage(memClass);
1167
1168		DE_ASSERT(usedMemClass <= totalMemClass);
1169
1170		return minAllocationSize > availableInHeap
1171				|| minAllocationSize > (totalMemClass - usedMemClass)
1172				|| minReferenceSize > (totalSysMem - totalUsage);
1173	}
1174}
1175
1176MemoryObject* MemoryHeap::allocateRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
1177{
1178	pair<MemoryType, VkDeviceSize> memoryTypeMaxSizePair;
1179
1180	// Pick random memory type
1181	{
1182		vector<pair<MemoryType, VkDeviceSize> > memoryTypes;
1183
1184		const VkDeviceSize	availableInHeap		= m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
1185		const bool			isUMA				= m_limits.totalDeviceLocalMemory == 0;
1186		const MemoryClass	memClass			= getMemoryClass();
1187
1188		// Collect memory types that can be allocated and the maximum size of allocation.
1189		// Memory type can be only allocated if minimal memory allocation is less than available memory.
1190		for (size_t memoryTypeNdx = 0; memoryTypeNdx < m_memoryTypes.size(); memoryTypeNdx++)
1191		{
1192			const MemoryType	type						= m_memoryTypes[memoryTypeNdx];
1193			const VkDeviceSize	atomSize					= (type.type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
1194															? 1
1195															: m_nonCoherentAtomSize;
1196			const VkDeviceSize	allocationSizeGranularity	= de::max(atomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1197			const VkDeviceSize	minAllocationSize			= allocationSizeGranularity;
1198			const VkDeviceSize	minReferenceSize			= minAllocationSize
1199															+ divRoundUp<VkDeviceSize>(minAllocationSize,  8)
1200															+ divRoundUp<VkDeviceSize>(minAllocationSize,  atomSize * 8);
1201
1202			if (isUMA)
1203			{
1204				// Max memory size calculation is little tricky since reference memory requires 1/n bits per byte.
1205				const VkDeviceSize	totalUsage				= m_totalMemTracker.getTotalUsage();
1206				const VkDeviceSize	totalSysMem				= (VkDeviceSize)m_limits.totalSystemMemory;
1207				const VkDeviceSize	availableBits			= (totalSysMem - totalUsage) * 8;
1208				// availableBits == maxAllocationSizeBits + maxAllocationReferenceSizeBits
1209				// maxAllocationReferenceSizeBits == maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1210				// availableBits == maxAllocationSizeBits + maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1211				// availableBits == 2 * maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1212				// availableBits == (2 + 1/8 + 1/atomSizeBits) * maxAllocationSizeBits
1213				// 8 * availableBits == (16 + 1 + 8/atomSizeBits) * maxAllocationSizeBits
1214				// atomSizeBits * 8 * availableBits == (17 * atomSizeBits + 8) * maxAllocationSizeBits
1215				// maxAllocationSizeBits == atomSizeBits * 8 * availableBits / (17 * atomSizeBits + 8)
1216				// maxAllocationSizeBytes == maxAllocationSizeBits / 8
1217				// maxAllocationSizeBytes == atomSizeBits * availableBits / (17 * atomSizeBits + 8)
1218				// atomSizeBits = atomSize * 8
1219				// maxAllocationSizeBytes == atomSize * 8 * availableBits / (17 * atomSize * 8 + 8)
1220				// maxAllocationSizeBytes == atomSize * availableBits / (17 * atomSize + 1)
1221				const VkDeviceSize	maxAllocationSize		= roundDownToMultiple(((atomSize * availableBits) / (17 * atomSize + 1)), allocationSizeGranularity);
1222
1223				DE_ASSERT(totalUsage <= totalSysMem);
1224				DE_ASSERT(maxAllocationSize <= totalSysMem);
1225
1226				if (minAllocationSize + minReferenceSize <= (totalSysMem - totalUsage) && minAllocationSize <= availableInHeap)
1227				{
1228					DE_ASSERT(maxAllocationSize >= minAllocationSize);
1229					memoryTypes.push_back(std::make_pair(type, maxAllocationSize));
1230				}
1231			}
1232			else
1233			{
1234				// Max memory size calculation is little tricky since reference memory requires 1/n bits per byte.
1235				const VkDeviceSize	totalUsage			= m_totalMemTracker.getTotalUsage();
1236				const VkDeviceSize	totalSysMem			= (VkDeviceSize)m_limits.totalSystemMemory;
1237
1238				const VkDeviceSize	totalMemClass		= memClass == MEMORY_CLASS_SYSTEM
1239														? m_limits.totalSystemMemory
1240														: m_limits.totalDeviceLocalMemory;
1241				const VkDeviceSize	usedMemClass		= m_totalMemTracker.getUsage(memClass);
1242				// availableRefBits = maxRefBits + maxRefBits/8 + maxRefBits/atomSizeBits
1243				// availableRefBits = maxRefBits * (1 + 1/8 + 1/atomSizeBits)
1244				// 8 * availableRefBits = maxRefBits * (8 + 1 + 8/atomSizeBits)
1245				// 8 * atomSizeBits * availableRefBits = maxRefBits * (9 * atomSizeBits + 8)
1246				// maxRefBits = 8 * atomSizeBits * availableRefBits / (9 * atomSizeBits + 8)
1247				// atomSizeBits = atomSize * 8
1248				// maxRefBits = 8 * atomSize * 8 * availableRefBits / (9 * atomSize * 8 + 8)
1249				// maxRefBits = atomSize * 8 * availableRefBits / (9 * atomSize + 1)
1250				// maxRefBytes = atomSize * availableRefBits / (9 * atomSize + 1)
1251				const VkDeviceSize	maxAllocationSize	= roundDownToMultiple(de::min(totalMemClass - usedMemClass, (atomSize * 8 * (totalSysMem - totalUsage)) / (9 * atomSize + 1)), allocationSizeGranularity);
1252
1253				DE_ASSERT(usedMemClass <= totalMemClass);
1254
1255				if (minAllocationSize <= availableInHeap
1256						&& minAllocationSize <= (totalMemClass - usedMemClass)
1257						&& minReferenceSize <= (totalSysMem - totalUsage))
1258				{
1259					DE_ASSERT(maxAllocationSize >= minAllocationSize);
1260					memoryTypes.push_back(std::make_pair(type, maxAllocationSize));
1261				}
1262
1263			}
1264		}
1265
1266		memoryTypeMaxSizePair = rng.choose<pair<MemoryType, VkDeviceSize> >(memoryTypes.begin(), memoryTypes.end());
1267	}
1268
1269	const MemoryType		type						= memoryTypeMaxSizePair.first;
1270	const VkDeviceSize		maxAllocationSize			= memoryTypeMaxSizePair.second / MAX_MEMORY_ALLOC_DIV;
1271	const VkDeviceSize		atomSize					= (type.type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
1272														? 1
1273														: m_nonCoherentAtomSize;
1274	const VkDeviceSize		allocationSizeGranularity	= de::max(atomSize, getMemoryClass() == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1275	const VkDeviceSize		size						= randomSize(rng, atomSize, maxAllocationSize);
1276	const VkDeviceSize		memoryUsage					= roundUpToMultiple(size, allocationSizeGranularity);
1277	const VkDeviceSize		referenceMemoryUsage		= size + divRoundUp<VkDeviceSize>(size, 8) + divRoundUp<VkDeviceSize>(size / atomSize, 8);
1278
1279	DE_ASSERT(size <= maxAllocationSize);
1280
1281	MemoryObject* const		object	= new MemoryObject(vkd, device, size, type.index, atomSize, memoryUsage, referenceMemoryUsage);
1282
1283	m_usage += memoryUsage;
1284	m_totalMemTracker.allocate(getMemoryClass(), memoryUsage);
1285	m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, referenceMemoryUsage);
1286	m_objects.push_back(object);
1287
1288	return object;
1289}
1290
1291size_t getMemoryObjectSystemSize (Context& context)
1292{
1293	return computeDeviceMemorySystemMemFootprint(context.getDeviceInterface(), context.getDevice())
1294		   + sizeof(MemoryObject)
1295		   + sizeof(de::SharedPtr<MemoryObject>);
1296}
1297
1298size_t getMemoryMappingSystemSize (void)
1299{
1300	return sizeof(MemoryMapping) + sizeof(de::SharedPtr<MemoryMapping>);
1301}
1302
1303class RandomMemoryMappingInstance : public TestInstance
1304{
1305public:
1306	RandomMemoryMappingInstance (Context& context, deUint32 seed)
1307		: TestInstance				(context)
1308		, m_memoryObjectSysMemSize	(getMemoryObjectSystemSize(context))
1309		, m_memoryMappingSysMemSize	(getMemoryMappingSystemSize())
1310		, m_memoryLimits			(getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
1311		, m_rng						(seed)
1312		, m_opNdx					(0)
1313	{
1314		const VkPhysicalDevice					physicalDevice		= context.getPhysicalDevice();
1315		const InstanceInterface&				vki					= context.getInstanceInterface();
1316		const VkPhysicalDeviceMemoryProperties	memoryProperties	= getPhysicalDeviceMemoryProperties(vki, physicalDevice);
1317		// \todo [2016-05-26 misojarvi] Remove zero check once drivers report correctly 1 instead of 0
1318		const VkDeviceSize						nonCoherentAtomSize	= context.getDeviceProperties().limits.nonCoherentAtomSize != 0
1319																	? context.getDeviceProperties().limits.nonCoherentAtomSize
1320																	: 1;
1321
1322		// Initialize heaps
1323		{
1324			vector<vector<MemoryType> >	memoryTypes	(memoryProperties.memoryHeapCount);
1325
1326			for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < memoryProperties.memoryTypeCount; memoryTypeNdx++)
1327			{
1328				if (memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
1329					memoryTypes[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].push_back(MemoryType(memoryTypeNdx, memoryProperties.memoryTypes[memoryTypeNdx]));
1330			}
1331
1332			for (deUint32 heapIndex = 0; heapIndex < memoryProperties.memoryHeapCount; heapIndex++)
1333			{
1334				const VkMemoryHeap	heapInfo	= memoryProperties.memoryHeaps[heapIndex];
1335
1336				if (!memoryTypes[heapIndex].empty())
1337				{
1338					const de::SharedPtr<MemoryHeap>	heap	(new MemoryHeap(heapInfo, memoryTypes[heapIndex], m_memoryLimits, nonCoherentAtomSize, m_totalMemTracker));
1339
1340					TCU_CHECK_INTERNAL(!heap->full());
1341
1342					m_memoryHeaps.push_back(heap);
1343				}
1344			}
1345		}
1346	}
1347
1348	~RandomMemoryMappingInstance (void)
1349	{
1350	}
1351
1352	tcu::TestStatus iterate (void)
1353	{
1354		const size_t			opCount						= 100;
1355		const float				memoryOpProbability			= 0.5f;		// 0.50
1356		const float				flushInvalidateProbability	= 0.4f;		// 0.20
1357		const float				mapProbability				= 0.50f;	// 0.15
1358		const float				unmapProbability			= 0.25f;	// 0.075
1359
1360		const float				allocProbability			= 0.75f; // Versun free
1361
1362		const VkDevice			device						= m_context.getDevice();
1363		const DeviceInterface&	vkd							= m_context.getDeviceInterface();
1364
1365		const VkDeviceSize		sysMemUsage					= (m_memoryLimits.totalDeviceLocalMemory == 0)
1366															? m_totalMemTracker.getTotalUsage()
1367															: m_totalMemTracker.getUsage(MEMORY_CLASS_SYSTEM);
1368
1369		if (!m_memoryMappings.empty() && m_rng.getFloat() < memoryOpProbability)
1370		{
1371			// Perform operations on mapped memory
1372			MemoryMapping* const	mapping	= m_rng.choose<MemoryMapping*>(m_memoryMappings.begin(), m_memoryMappings.end());
1373
1374			enum Op
1375			{
1376				OP_READ = 0,
1377				OP_WRITE,
1378				OP_MODIFY,
1379				OP_LAST
1380			};
1381
1382			const Op op = (Op)(m_rng.getUint32() % OP_LAST);
1383
1384			switch (op)
1385			{
1386				case OP_READ:
1387					mapping->randomRead(m_rng);
1388					break;
1389
1390				case OP_WRITE:
1391					mapping->randomWrite(m_rng);
1392					break;
1393
1394				case OP_MODIFY:
1395					mapping->randomModify(m_rng);
1396					break;
1397
1398				default:
1399					DE_FATAL("Invalid operation");
1400			}
1401		}
1402		else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < flushInvalidateProbability)
1403		{
1404			MemoryObject* const	object	= m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
1405
1406			if (m_rng.getBool())
1407				object->randomFlush(vkd, device, m_rng);
1408			else
1409				object->randomInvalidate(vkd, device, m_rng);
1410		}
1411		else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < unmapProbability)
1412		{
1413			// Unmap memory object
1414			MemoryObject* const	object	= m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
1415
1416			// Remove mapping
1417			removeFirstEqual(m_memoryMappings, object->getMapping());
1418
1419			object->unmap();
1420			removeFirstEqual(m_mappedMemoryObjects, object);
1421			m_nonMappedMemoryObjects.push_back(object);
1422
1423			m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
1424		}
1425		else if (!m_nonMappedMemoryObjects.empty() &&
1426				 (m_rng.getFloat() < mapProbability) &&
1427				 (sysMemUsage+m_memoryMappingSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory))
1428		{
1429			// Map memory object
1430			MemoryObject* const		object	= m_rng.choose<MemoryObject*>(m_nonMappedMemoryObjects.begin(), m_nonMappedMemoryObjects.end());
1431			MemoryMapping*			mapping	= object->mapRandom(vkd, device, m_rng);
1432
1433			m_memoryMappings.push_back(mapping);
1434			m_mappedMemoryObjects.push_back(object);
1435			removeFirstEqual(m_nonMappedMemoryObjects, object);
1436
1437			m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
1438		}
1439		else
1440		{
1441			// Sort heaps based on capacity (full or not)
1442			vector<MemoryHeap*>		nonFullHeaps;
1443			vector<MemoryHeap*>		nonEmptyHeaps;
1444
1445			if (sysMemUsage+m_memoryObjectSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory)
1446			{
1447				// For the duration of sorting reserve MemoryObject space from system memory
1448				m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1449
1450				for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
1451					 heapIter != m_memoryHeaps.end();
1452					 ++heapIter)
1453				{
1454					if (!(*heapIter)->full())
1455						nonFullHeaps.push_back(heapIter->get());
1456
1457					if (!(*heapIter)->empty())
1458						nonEmptyHeaps.push_back(heapIter->get());
1459				}
1460
1461				m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1462			}
1463			else
1464			{
1465				// Not possible to even allocate MemoryObject from system memory, look for non-empty heaps
1466				for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
1467					 heapIter != m_memoryHeaps.end();
1468					 ++heapIter)
1469				{
1470					if (!(*heapIter)->empty())
1471						nonEmptyHeaps.push_back(heapIter->get());
1472				}
1473			}
1474
1475			if (!nonFullHeaps.empty() && (nonEmptyHeaps.empty() || m_rng.getFloat() < allocProbability))
1476			{
1477				// Reserve MemoryObject from sys mem first
1478				m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1479
1480				// Allocate more memory objects
1481				MemoryHeap* const	heap	= m_rng.choose<MemoryHeap*>(nonFullHeaps.begin(), nonFullHeaps.end());
1482				MemoryObject* const	object	= heap->allocateRandom(vkd, device, m_rng);
1483
1484				m_nonMappedMemoryObjects.push_back(object);
1485			}
1486			else
1487			{
1488				// Free memory objects
1489				MemoryHeap* const		heap	= m_rng.choose<MemoryHeap*>(nonEmptyHeaps.begin(), nonEmptyHeaps.end());
1490				MemoryObject* const		object	= heap->getRandomObject(m_rng);
1491
1492				// Remove mapping
1493				if (object->getMapping())
1494				{
1495					removeFirstEqual(m_memoryMappings, object->getMapping());
1496					m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, m_memoryMappingSysMemSize);
1497				}
1498
1499				removeFirstEqual(m_mappedMemoryObjects, object);
1500				removeFirstEqual(m_nonMappedMemoryObjects, object);
1501
1502				heap->free(object);
1503				m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1504			}
1505		}
1506
1507		m_opNdx += 1;
1508		if (m_opNdx == opCount)
1509			return tcu::TestStatus::pass("Pass");
1510		else
1511			return tcu::TestStatus::incomplete();
1512	}
1513
1514private:
1515	const size_t						m_memoryObjectSysMemSize;
1516	const size_t						m_memoryMappingSysMemSize;
1517	const PlatformMemoryLimits			m_memoryLimits;
1518
1519	de::Random							m_rng;
1520	size_t								m_opNdx;
1521
1522	TotalMemoryTracker					m_totalMemTracker;
1523	vector<de::SharedPtr<MemoryHeap> >	m_memoryHeaps;
1524
1525	vector<MemoryObject*>				m_mappedMemoryObjects;
1526	vector<MemoryObject*>				m_nonMappedMemoryObjects;
1527	vector<MemoryMapping*>				m_memoryMappings;
1528};
1529
1530enum Op
1531{
1532	OP_NONE = 0,
1533
1534	OP_FLUSH,
1535	OP_SUB_FLUSH,
1536	OP_SUB_FLUSH_SEPARATE,
1537	OP_SUB_FLUSH_OVERLAPPING,
1538
1539	OP_INVALIDATE,
1540	OP_SUB_INVALIDATE,
1541	OP_SUB_INVALIDATE_SEPARATE,
1542	OP_SUB_INVALIDATE_OVERLAPPING,
1543
1544	OP_REMAP,
1545
1546	OP_LAST
1547};
1548
1549TestConfig subMappedConfig (VkDeviceSize				allocationSize,
1550							const MemoryRange&			mapping,
1551							Op							op,
1552							deUint32					seed,
1553							AllocationKind				allocationKind)
1554{
1555	TestConfig config;
1556
1557	config.allocationSize	= allocationSize;
1558	config.seed				= seed;
1559	config.mapping			= mapping;
1560	config.remap			= false;
1561	config.allocationKind	= allocationKind;
1562
1563	switch (op)
1564	{
1565		case OP_NONE:
1566			return config;
1567
1568		case OP_REMAP:
1569			config.remap = true;
1570			return config;
1571
1572		case OP_FLUSH:
1573			config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1574			return config;
1575
1576		case OP_SUB_FLUSH:
1577			DE_ASSERT(mapping.size / 4 > 0);
1578
1579			config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1580			return config;
1581
1582		case OP_SUB_FLUSH_SEPARATE:
1583			DE_ASSERT(mapping.size / 2 > 0);
1584
1585			config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  2, mapping.size - (mapping.size / 2)));
1586			config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1587
1588			return config;
1589
1590		case OP_SUB_FLUSH_OVERLAPPING:
1591			DE_ASSERT((mapping.size / 3) > 0);
1592
1593			config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  3, mapping.size - (mapping.size / 2)));
1594			config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1595
1596			return config;
1597
1598		case OP_INVALIDATE:
1599			config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1600			config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1601			return config;
1602
1603		case OP_SUB_INVALIDATE:
1604			DE_ASSERT(mapping.size / 4 > 0);
1605
1606			config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1607			config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1608			return config;
1609
1610		case OP_SUB_INVALIDATE_SEPARATE:
1611			DE_ASSERT(mapping.size / 2 > 0);
1612
1613			config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  2, mapping.size - (mapping.size / 2)));
1614			config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1615
1616			config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size /  2, mapping.size - (mapping.size / 2)));
1617			config.invalidateMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1618
1619			return config;
1620
1621		case OP_SUB_INVALIDATE_OVERLAPPING:
1622			DE_ASSERT((mapping.size / 3) > 0);
1623
1624			config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  3, mapping.size - (mapping.size / 2)));
1625			config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1626
1627			config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size /  3, mapping.size - (mapping.size / 2)));
1628			config.invalidateMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1629
1630			return config;
1631
1632		default:
1633			DE_FATAL("Unknown Op");
1634			return TestConfig();
1635	}
1636}
1637
1638TestConfig fullMappedConfig (VkDeviceSize	allocationSize,
1639							 Op				op,
1640							 deUint32		seed,
1641							 AllocationKind	allocationKind)
1642{
1643	return subMappedConfig(allocationSize, MemoryRange(0, allocationSize), op, seed, allocationKind);
1644}
1645
1646} // anonymous
1647
1648tcu::TestCaseGroup* createMappingTests (tcu::TestContext& testCtx)
1649{
1650	de::MovePtr<tcu::TestCaseGroup>		group							(new tcu::TestCaseGroup(testCtx, "mapping", "Memory mapping tests."));
1651	de::MovePtr<tcu::TestCaseGroup>		dedicated						(new tcu::TestCaseGroup(testCtx, "dedicated_alloc", "Dedicated memory mapping tests."));
1652	de::MovePtr<tcu::TestCaseGroup>		sets[]							=
1653	{
1654		de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "suballocation", "Suballocated memory mapping tests.")),
1655		de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "buffer", "Buffer dedicated memory mapping tests.")),
1656		de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "image", "Image dedicated memory mapping tests."))
1657	};
1658
1659	const VkDeviceSize allocationSizes[] =
1660	{
1661		33, 257, 4087, 8095, 1*1024*1024 + 1
1662	};
1663
1664	const VkDeviceSize offsets[] =
1665	{
1666		0, 17, 129, 255, 1025, 32*1024+1
1667	};
1668
1669	const VkDeviceSize sizes[] =
1670	{
1671		31, 255, 1025, 4085, 1*1024*1024 - 1
1672	};
1673
1674	const struct
1675	{
1676		const Op			op;
1677		const char* const	name;
1678	} ops[] =
1679	{
1680		{ OP_NONE,						"simple"					},
1681		{ OP_REMAP,						"remap"						},
1682		{ OP_FLUSH,						"flush"						},
1683		{ OP_SUB_FLUSH,					"subflush"					},
1684		{ OP_SUB_FLUSH_SEPARATE,		"subflush_separate"			},
1685		{ OP_SUB_FLUSH_SEPARATE,		"subflush_overlapping"		},
1686
1687		{ OP_INVALIDATE,				"invalidate"				},
1688		{ OP_SUB_INVALIDATE,			"subinvalidate"				},
1689		{ OP_SUB_INVALIDATE_SEPARATE,	"subinvalidate_separate"	},
1690		{ OP_SUB_INVALIDATE_SEPARATE,	"subinvalidate_overlapping"	}
1691	};
1692
1693	// .full
1694	for (size_t allocationKindNdx = 0; allocationKindNdx < ALLOCATION_KIND_LAST; allocationKindNdx++)
1695	{
1696		de::MovePtr<tcu::TestCaseGroup> fullGroup (new tcu::TestCaseGroup(testCtx, "full", "Map memory completely."));
1697
1698		for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1699		{
1700			const VkDeviceSize				allocationSize		= allocationSizes[allocationSizeNdx];
1701			de::MovePtr<tcu::TestCaseGroup>	allocationSizeGroup	(new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
1702
1703			for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1704			{
1705				const Op			op		= ops[opNdx].op;
1706				const char* const	name	= ops[opNdx].name;
1707				const deUint32		seed	= (deUint32)(opNdx * allocationSizeNdx);
1708				const TestConfig	config	= fullMappedConfig(allocationSize, op, seed, static_cast<AllocationKind>(allocationKindNdx));
1709
1710				addFunctionCase(allocationSizeGroup.get(), name, name, testMemoryMapping, config);
1711			}
1712
1713			fullGroup->addChild(allocationSizeGroup.release());
1714		}
1715
1716		sets[allocationKindNdx]->addChild(fullGroup.release());
1717	}
1718
1719	// .sub
1720	for (size_t allocationKindNdx = 0; allocationKindNdx < ALLOCATION_KIND_LAST; allocationKindNdx++)
1721	{
1722		de::MovePtr<tcu::TestCaseGroup> subGroup (new tcu::TestCaseGroup(testCtx, "sub", "Map part of the memory."));
1723
1724		for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1725		{
1726			const VkDeviceSize				allocationSize		= allocationSizes[allocationSizeNdx];
1727			de::MovePtr<tcu::TestCaseGroup>	allocationSizeGroup	(new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
1728
1729			for (size_t offsetNdx = 0; offsetNdx < DE_LENGTH_OF_ARRAY(offsets); offsetNdx++)
1730			{
1731				const VkDeviceSize				offset			= offsets[offsetNdx];
1732
1733				if (offset >= allocationSize)
1734					continue;
1735
1736				de::MovePtr<tcu::TestCaseGroup>	offsetGroup		(new tcu::TestCaseGroup(testCtx, ("offset_" + de::toString(offset)).c_str(), ""));
1737
1738				for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
1739				{
1740					const VkDeviceSize				size		= sizes[sizeNdx];
1741
1742					if (offset + size > allocationSize)
1743						continue;
1744
1745					if (offset == 0 && size == allocationSize)
1746						continue;
1747
1748					de::MovePtr<tcu::TestCaseGroup>	sizeGroup	(new tcu::TestCaseGroup(testCtx, ("size_" + de::toString(size)).c_str(), ""));
1749
1750					for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1751					{
1752						const deUint32		seed	= (deUint32)(opNdx * allocationSizeNdx);
1753						const Op			op		= ops[opNdx].op;
1754						const char* const	name	= ops[opNdx].name;
1755						const TestConfig	config	= subMappedConfig(allocationSize, MemoryRange(offset, size), op, seed, static_cast<AllocationKind>(allocationKindNdx));
1756
1757						addFunctionCase(sizeGroup.get(), name, name, testMemoryMapping, config);
1758					}
1759
1760					offsetGroup->addChild(sizeGroup.release());
1761				}
1762
1763				allocationSizeGroup->addChild(offsetGroup.release());
1764			}
1765
1766			subGroup->addChild(allocationSizeGroup.release());
1767		}
1768
1769		sets[allocationKindNdx]->addChild(subGroup.release());
1770	}
1771
1772	// .random
1773	{
1774		de::MovePtr<tcu::TestCaseGroup>	randomGroup	(new tcu::TestCaseGroup(testCtx, "random", "Random memory mapping tests."));
1775		de::Random						rng			(3927960301u);
1776
1777		for (size_t ndx = 0; ndx < 100; ndx++)
1778		{
1779			const deUint32		seed	= rng.getUint32();
1780			const std::string	name	= de::toString(ndx);
1781
1782			randomGroup->addChild(new InstanceFactory1<RandomMemoryMappingInstance, deUint32>(testCtx, tcu::NODETYPE_SELF_VALIDATE, de::toString(ndx), "Random case", seed));
1783		}
1784
1785		sets[static_cast<deUint32>(ALLOCATION_KIND_SUBALLOCATED)]->addChild(randomGroup.release());
1786	}
1787
1788	group->addChild(sets[0].release());
1789	dedicated->addChild(sets[1].release());
1790	dedicated->addChild(sets[2].release());
1791	group->addChild(dedicated.release());
1792
1793	return group.release();
1794}
1795
1796} // memory
1797} // vkt
1798