es31fOpaqueTypeIndexingTests.cpp revision 3c827367444ee418f129b2c238299f49d3264554
1/*-------------------------------------------------------------------------
2 * drawElements Quality Program OpenGL ES 3.1 Module
3 * -------------------------------------------------
4 *
5 * Copyright 2014 The Android Open Source Project
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 *      http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
22 *
23 * \todo [2014-03-05 pyry] Extend with following:
24 *  + sampler: different filtering modes, multiple sizes, incomplete textures
25 *  + SSBO: write, atomic op, unsized array .length()
26 *//*--------------------------------------------------------------------*/
27
28#include "es31fOpaqueTypeIndexingTests.hpp"
29#include "tcuTexture.hpp"
30#include "tcuTestLog.hpp"
31#include "tcuFormatUtil.hpp"
32#include "tcuVectorUtil.hpp"
33#include "gluShaderUtil.hpp"
34#include "gluShaderProgram.hpp"
35#include "gluObjectWrapper.hpp"
36#include "gluTextureUtil.hpp"
37#include "gluRenderContext.hpp"
38#include "gluProgramInterfaceQuery.hpp"
39#include "gluContextInfo.hpp"
40#include "glsShaderExecUtil.hpp"
41#include "glwFunctions.hpp"
42#include "glwEnums.hpp"
43#include "deUniquePtr.hpp"
44#include "deStringUtil.hpp"
45#include "deRandom.hpp"
46
47#include <sstream>
48
49namespace deqp
50{
51namespace gles31
52{
53namespace Functional
54{
55
56namespace
57{
58
59using namespace gls::ShaderExecUtil;
60using namespace glu;
61using std::string;
62using std::vector;
63using tcu::TextureFormat;
64using tcu::TestLog;
65
66typedef de::UniquePtr<ShaderExecutor> ShaderExecutorPtr;
67
68enum IndexExprType
69{
70	INDEX_EXPR_TYPE_CONST_LITERAL	= 0,
71	INDEX_EXPR_TYPE_UNIFORM,
72	INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
73
74	INDEX_EXPR_TYPE_LAST
75};
76
77enum TextureType
78{
79	TEXTURE_TYPE_1D = 0,
80	TEXTURE_TYPE_2D,
81	TEXTURE_TYPE_CUBE,
82	TEXTURE_TYPE_2D_ARRAY,
83	TEXTURE_TYPE_3D,
84
85	TEXTURE_TYPE_LAST
86};
87
88static void declareUniformIndexVars (std::ostream& str, const char* varPrefix, int numVars)
89{
90	for (int varNdx = 0; varNdx < numVars; varNdx++)
91		str << "uniform highp int " << varPrefix << varNdx << ";\n";
92}
93
94static void uploadUniformIndices (const glw::Functions& gl, deUint32 program, const char* varPrefix, int numIndices, const int* indices)
95{
96	for (int varNdx = 0; varNdx < numIndices; varNdx++)
97	{
98		const string	varName		= varPrefix + de::toString(varNdx);
99		const int		loc			= gl.getUniformLocation(program, varName.c_str());
100		TCU_CHECK_MSG(loc >= 0, ("No location assigned for uniform '" + varName + "'").c_str());
101
102		gl.uniform1i(loc, indices[varNdx]);
103	}
104}
105
106template<typename T>
107static T maxElement (const std::vector<T>& elements)
108{
109	T maxElem = elements[0];
110
111	for (size_t ndx = 1; ndx < elements.size(); ndx++)
112		maxElem = de::max(maxElem, elements[ndx]);
113
114	return maxElem;
115}
116
117static TextureType getTextureType (glu::DataType samplerType)
118{
119	switch (samplerType)
120	{
121		case glu::TYPE_SAMPLER_1D:
122		case glu::TYPE_INT_SAMPLER_1D:
123		case glu::TYPE_UINT_SAMPLER_1D:
124		case glu::TYPE_SAMPLER_1D_SHADOW:
125			return TEXTURE_TYPE_1D;
126
127		case glu::TYPE_SAMPLER_2D:
128		case glu::TYPE_INT_SAMPLER_2D:
129		case glu::TYPE_UINT_SAMPLER_2D:
130		case glu::TYPE_SAMPLER_2D_SHADOW:
131			return TEXTURE_TYPE_2D;
132
133		case glu::TYPE_SAMPLER_CUBE:
134		case glu::TYPE_INT_SAMPLER_CUBE:
135		case glu::TYPE_UINT_SAMPLER_CUBE:
136		case glu::TYPE_SAMPLER_CUBE_SHADOW:
137			return TEXTURE_TYPE_CUBE;
138
139		case glu::TYPE_SAMPLER_2D_ARRAY:
140		case glu::TYPE_INT_SAMPLER_2D_ARRAY:
141		case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
142		case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
143			return TEXTURE_TYPE_2D_ARRAY;
144
145		case glu::TYPE_SAMPLER_3D:
146		case glu::TYPE_INT_SAMPLER_3D:
147		case glu::TYPE_UINT_SAMPLER_3D:
148			return TEXTURE_TYPE_3D;
149
150		default:
151			throw tcu::InternalError("Invalid sampler type");
152	}
153}
154
155static bool isShadowSampler (glu::DataType samplerType)
156{
157	return samplerType == glu::TYPE_SAMPLER_1D_SHADOW		||
158		   samplerType == glu::TYPE_SAMPLER_2D_SHADOW		||
159		   samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW	||
160		   samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW;
161}
162
163static glu::DataType getSamplerOutputType (glu::DataType samplerType)
164{
165	switch (samplerType)
166	{
167		case glu::TYPE_SAMPLER_1D:
168		case glu::TYPE_SAMPLER_2D:
169		case glu::TYPE_SAMPLER_CUBE:
170		case glu::TYPE_SAMPLER_2D_ARRAY:
171		case glu::TYPE_SAMPLER_3D:
172			return glu::TYPE_FLOAT_VEC4;
173
174		case glu::TYPE_SAMPLER_1D_SHADOW:
175		case glu::TYPE_SAMPLER_2D_SHADOW:
176		case glu::TYPE_SAMPLER_CUBE_SHADOW:
177		case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
178			return glu::TYPE_FLOAT;
179
180		case glu::TYPE_INT_SAMPLER_1D:
181		case glu::TYPE_INT_SAMPLER_2D:
182		case glu::TYPE_INT_SAMPLER_CUBE:
183		case glu::TYPE_INT_SAMPLER_2D_ARRAY:
184		case glu::TYPE_INT_SAMPLER_3D:
185			return glu::TYPE_INT_VEC4;
186
187		case glu::TYPE_UINT_SAMPLER_1D:
188		case glu::TYPE_UINT_SAMPLER_2D:
189		case glu::TYPE_UINT_SAMPLER_CUBE:
190		case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
191		case glu::TYPE_UINT_SAMPLER_3D:
192			return glu::TYPE_UINT_VEC4;
193
194		default:
195			throw tcu::InternalError("Invalid sampler type");
196	}
197}
198
199static tcu::TextureFormat getSamplerTextureFormat (glu::DataType samplerType)
200{
201	const glu::DataType		outType			= getSamplerOutputType(samplerType);
202	const glu::DataType		outScalarType	= glu::getDataTypeScalarType(outType);
203
204	switch (outScalarType)
205	{
206		case glu::TYPE_FLOAT:
207			if (isShadowSampler(samplerType))
208				return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
209			else
210				return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
211
212		case glu::TYPE_INT:		return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
213		case glu::TYPE_UINT:	return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
214
215		default:
216			throw tcu::InternalError("Invalid sampler type");
217	}
218}
219
220static glu::DataType getSamplerCoordType (glu::DataType samplerType)
221{
222	const TextureType	texType		= getTextureType(samplerType);
223	int					numCoords	= 0;
224
225	switch (texType)
226	{
227		case TEXTURE_TYPE_1D:		numCoords = 1;	break;
228		case TEXTURE_TYPE_2D:		numCoords = 2;	break;
229		case TEXTURE_TYPE_2D_ARRAY:	numCoords = 3;	break;
230		case TEXTURE_TYPE_CUBE:		numCoords = 3;	break;
231		case TEXTURE_TYPE_3D:		numCoords = 3;	break;
232		default:
233			DE_ASSERT(false);
234	}
235
236	if (isShadowSampler(samplerType))
237		numCoords += 1;
238
239	DE_ASSERT(de::inRange(numCoords, 1, 4));
240
241	return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
242}
243
244static deUint32 getGLTextureTarget (TextureType texType)
245{
246	switch (texType)
247	{
248		case TEXTURE_TYPE_1D:		return GL_TEXTURE_1D;
249		case TEXTURE_TYPE_2D:		return GL_TEXTURE_2D;
250		case TEXTURE_TYPE_2D_ARRAY:	return GL_TEXTURE_2D_ARRAY;
251		case TEXTURE_TYPE_CUBE:		return GL_TEXTURE_CUBE_MAP;
252		case TEXTURE_TYPE_3D:		return GL_TEXTURE_3D;
253		default:
254			DE_ASSERT(false);
255			return 0;
256	}
257}
258
259static void setupTexture (const glw::Functions&	gl,
260						  deUint32				texture,
261						  glu::DataType			samplerType,
262						  tcu::TextureFormat	texFormat,
263						  const void*			color)
264{
265	const TextureType			texType		= getTextureType(samplerType);
266	const deUint32				texTarget	= getGLTextureTarget(texType);
267	const deUint32				intFormat	= glu::getInternalFormat(texFormat);
268	const glu::TransferFormat	transferFmt	= glu::getTransferFormat(texFormat);
269
270	// \todo [2014-03-04 pyry] Use larger than 1x1 textures?
271
272	gl.bindTexture(texTarget, texture);
273
274	switch (texType)
275	{
276		case TEXTURE_TYPE_1D:
277			gl.texStorage1D(texTarget, 1, intFormat, 1);
278			gl.texSubImage1D(texTarget, 0, 0, 1, transferFmt.format, transferFmt.dataType, color);
279			break;
280
281		case TEXTURE_TYPE_2D:
282			gl.texStorage2D(texTarget, 1, intFormat, 1, 1);
283			gl.texSubImage2D(texTarget, 0, 0, 0, 1, 1, transferFmt.format, transferFmt.dataType, color);
284			break;
285
286		case TEXTURE_TYPE_2D_ARRAY:
287		case TEXTURE_TYPE_3D:
288			gl.texStorage3D(texTarget, 1, intFormat, 1, 1, 1);
289			gl.texSubImage3D(texTarget, 0, 0, 0, 0, 1, 1, 1, transferFmt.format, transferFmt.dataType, color);
290			break;
291
292		case TEXTURE_TYPE_CUBE:
293			gl.texStorage2D(texTarget, 1, intFormat, 1, 1);
294			for (int face = 0; face < tcu::CUBEFACE_LAST; face++)
295				gl.texSubImage2D(glu::getGLCubeFace((tcu::CubeFace)face), 0, 0, 0, 1, 1, transferFmt.format, transferFmt.dataType, color);
296			break;
297
298		default:
299			DE_ASSERT(false);
300	}
301
302	gl.texParameteri(texTarget, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
303	gl.texParameteri(texTarget, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
304
305	if (isShadowSampler(samplerType))
306		gl.texParameteri(texTarget, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
307
308	GLU_EXPECT_NO_ERROR(gl.getError(), "Texture setup failed");
309}
310
311class SamplerIndexingCase : public TestCase
312{
313public:
314							SamplerIndexingCase			(Context& context, const char* name, const char* description, glu::ShaderType shaderType, glu::DataType samplerType, IndexExprType indexExprType);
315							~SamplerIndexingCase		(void);
316
317	void					init						(void);
318	IterateResult			iterate						(void);
319
320private:
321							SamplerIndexingCase			(const SamplerIndexingCase&);
322	SamplerIndexingCase&	operator=					(const SamplerIndexingCase&);
323
324	void					getShaderSpec				(ShaderSpec* spec, int numSamplers, int numLookups, const int* lookupIndices) const;
325
326	const glu::ShaderType	m_shaderType;
327	const glu::DataType		m_samplerType;
328	const IndexExprType		m_indexExprType;
329};
330
331SamplerIndexingCase::SamplerIndexingCase (Context& context, const char* name, const char* description, glu::ShaderType shaderType, glu::DataType samplerType, IndexExprType indexExprType)
332	: TestCase			(context, name, description)
333	, m_shaderType		(shaderType)
334	, m_samplerType		(samplerType)
335	, m_indexExprType	(indexExprType)
336{
337}
338
339SamplerIndexingCase::~SamplerIndexingCase (void)
340{
341}
342
343void SamplerIndexingCase::init (void)
344{
345	const char* extName = "GL_EXT_gpu_shader5";
346
347	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL &&
348		!m_context.getContextInfo().isExtensionSupported(extName))
349		throw tcu::NotSupportedError(string(extName) + " extension is required for dynamic indexing of sampler arrays");
350}
351
352void SamplerIndexingCase::getShaderSpec (ShaderSpec* spec, int numSamplers, int numLookups, const int* lookupIndices) const
353{
354	const char*			samplersName	= "sampler";
355	const char*			coordsName		= "coords";
356	const char*			indicesPrefix	= "index";
357	const char*			resultPrefix	= "result";
358	const DataType		coordType		= getSamplerCoordType(m_samplerType);
359	const DataType		outType			= getSamplerOutputType(m_samplerType);
360	std::ostringstream	global, code;
361
362	spec->inputs.push_back(Symbol(coordsName, VarType(coordType, PRECISION_HIGHP)));
363
364	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
365		global << "#extension GL_EXT_gpu_shader5 : require\n";
366
367	global <<
368		"uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << numSamplers << "];\n";
369
370	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
371	{
372		for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
373		{
374			const string varName = indicesPrefix + de::toString(lookupNdx);
375			spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
376		}
377	}
378	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
379		declareUniformIndexVars(global, indicesPrefix, numLookups);
380
381	for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
382	{
383		const string varName = resultPrefix + de::toString(lookupNdx);
384		spec->outputs.push_back(Symbol(varName, VarType(outType, PRECISION_HIGHP)));
385	}
386
387	for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
388	{
389		code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
390
391		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
392			code << lookupIndices[lookupNdx];
393		else
394			code << indicesPrefix << lookupNdx;
395
396		code << "], " << coordsName << ");\n";
397	}
398
399	spec->version				= GLSL_VERSION_310_ES;
400	spec->globalDeclarations	= global.str();
401	spec->source				= code.str();
402}
403
404static void fillTextureData (const tcu::PixelBufferAccess& access, de::Random& rnd)
405{
406	DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
407
408	if (access.getFormat().order == TextureFormat::D)
409	{
410		// \note Texture uses odd values, lookup even values to avoid precision issues.
411		const float values[] = { 0.1f, 0.3f, 0.5f, 0.7f, 0.9f };
412
413		for (int ndx = 0; ndx < access.getWidth(); ndx++)
414			access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
415	}
416	else
417	{
418		TCU_CHECK_INTERNAL(access.getFormat().order == TextureFormat::RGBA && access.getFormat().getPixelSize() == 4);
419
420		for (int ndx = 0; ndx < access.getWidth(); ndx++)
421			*((deUint32*)access.getDataPtr() + ndx) = rnd.getUint32();
422	}
423}
424
425SamplerIndexingCase::IterateResult SamplerIndexingCase::iterate (void)
426{
427	const int						numInvocations		= 64;
428	const int						numSamplers			= 8;
429	const int						numLookups			= 4;
430	const DataType					coordType			= getSamplerCoordType(m_samplerType);
431	const DataType					outputType			= getSamplerOutputType(m_samplerType);
432	const TextureFormat				texFormat			= getSamplerTextureFormat(m_samplerType);
433	const int						outLookupStride		= numInvocations*getDataTypeScalarSize(outputType);
434	vector<int>						lookupIndices		(numLookups);
435	vector<float>					coords;
436	vector<deUint32>				outData;
437	vector<deUint8>					texData				(numSamplers * texFormat.getPixelSize());
438	const tcu::PixelBufferAccess	refTexAccess		(texFormat, numSamplers, 1, 1, &texData[0]);
439	ShaderSpec						shaderSpec;
440	de::Random						rnd					(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
441
442	for (int ndx = 0; ndx < numLookups; ndx++)
443		lookupIndices[ndx] = rnd.getInt(0, numSamplers-1);
444
445	getShaderSpec(&shaderSpec, numSamplers, numLookups, &lookupIndices[0]);
446
447	coords.resize(numInvocations * getDataTypeScalarSize(coordType));
448
449	if (isShadowSampler(m_samplerType))
450	{
451		// Use different comparison value per invocation.
452		// \note Texture uses odd values, comparison even values.
453		const int	numCoordComps	= getDataTypeScalarSize(coordType);
454		const float	cmpValues[]		= { 0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f };
455
456		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
457			coords[invocationNdx*numCoordComps + (numCoordComps-1)] = rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
458	}
459
460	fillTextureData(refTexAccess, rnd);
461
462	outData.resize(numLookups*outLookupStride);
463
464	{
465		const RenderContext&	renderCtx		= m_context.getRenderContext();
466		const glw::Functions&	gl				= renderCtx.getFunctions();
467		ShaderExecutorPtr		executor		(createExecutor(m_context.getRenderContext(), m_shaderType, shaderSpec));
468		TextureVector			textures		(renderCtx, numSamplers);
469		vector<void*>			inputs;
470		vector<void*>			outputs;
471		vector<int>				expandedIndices;
472		const int				maxIndex		= maxElement(lookupIndices);
473
474		m_testCtx.getLog() << *executor;
475
476		if (!executor->isOk())
477			TCU_FAIL("Compile failed");
478
479		executor->useProgram();
480
481		// \todo [2014-03-05 pyry] Do we want to randomize tex unit assignments?
482		for (int samplerNdx = 0; samplerNdx < numSamplers; samplerNdx++)
483		{
484			const string	samplerName	= string("sampler[") + de::toString(samplerNdx) + "]";
485			const int		samplerLoc	= gl.getUniformLocation(executor->getProgram(), samplerName.c_str());
486
487			if (samplerNdx > maxIndex && samplerLoc < 0)
488				continue; // Unused uniform eliminated by compiler
489
490			TCU_CHECK_MSG(samplerLoc >= 0, (string("No location for uniform '") + samplerName + "' found").c_str());
491
492			gl.activeTexture(GL_TEXTURE0 + samplerNdx);
493			setupTexture(gl, textures[samplerNdx], m_samplerType, texFormat, &texData[samplerNdx*texFormat.getPixelSize()]);
494
495			gl.uniform1i(samplerLoc, samplerNdx);
496		}
497
498		inputs.push_back(&coords[0]);
499
500		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
501		{
502			expandedIndices.resize(numInvocations * lookupIndices.size());
503			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
504			{
505				for (int invNdx = 0; invNdx < numInvocations; invNdx++)
506					expandedIndices[lookupNdx*numInvocations + invNdx] = lookupIndices[lookupNdx];
507			}
508
509			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
510				inputs.push_back(&expandedIndices[lookupNdx*numInvocations]);
511		}
512		else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
513			uploadUniformIndices(gl, executor->getProgram(), "index", numLookups, &lookupIndices[0]);
514
515		for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
516			outputs.push_back(&outData[outLookupStride*lookupNdx]);
517
518		GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
519
520		executor->execute(numInvocations, &inputs[0], &outputs[0]);
521	}
522
523	m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
524
525	if (isShadowSampler(m_samplerType))
526	{
527		const tcu::Sampler	refSampler		(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
528											 tcu::Sampler::NEAREST, tcu::Sampler::NEAREST, 0.0f, false /* non-normalized */,
529											 tcu::Sampler::COMPAREMODE_LESS);
530		const int			numCoordComps	= getDataTypeScalarSize(coordType);
531
532		TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
533
534		// Each invocation may have different results.
535		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
536		{
537			const float	coord	= coords[invocationNdx*numCoordComps + (numCoordComps-1)];
538
539			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
540			{
541				const int		texNdx		= lookupIndices[lookupNdx];
542				const float		result		= *((const float*)(const deUint8*)&outData[lookupNdx*outLookupStride + invocationNdx]);
543				const float		reference	= refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord, (float)texNdx, 0.0f, tcu::IVec3(0));
544
545				if (de::abs(result-reference) > 0.005f)
546				{
547					m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup " << lookupNdx << ": expected "
548														   << reference << ", got " << result
549									   << TestLog::EndMessage;
550
551					if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
552						m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Got invalid lookup result");
553				}
554			}
555		}
556	}
557	else
558	{
559		TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
560
561		// Validate results from first invocation
562		for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
563		{
564			const int		texNdx	= lookupIndices[lookupNdx];
565			const deUint8*	resPtr	= (const deUint8*)&outData[lookupNdx*outLookupStride];
566			bool			isOk;
567
568			if (outputType == TYPE_FLOAT_VEC4)
569			{
570				const float			threshold		= 1.0f / 256.0f;
571				const tcu::Vec4		reference		= refTexAccess.getPixel(texNdx, 0);
572				const float*		floatPtr		= (const float*)resPtr;
573				const tcu::Vec4		result			(floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
574
575				isOk = boolAll(lessThanEqual(abs(reference-result), tcu::Vec4(threshold)));
576
577				if (!isOk)
578				{
579					m_testCtx.getLog() << TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
580														   << reference << ", got " << result
581									   << TestLog::EndMessage;
582				}
583			}
584			else
585			{
586				const tcu::UVec4	reference		= refTexAccess.getPixelUint(texNdx, 0);
587				const deUint32*		uintPtr			= (const deUint32*)resPtr;
588				const tcu::UVec4	result			(uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
589
590				isOk = boolAll(equal(reference, result));
591
592				if (!isOk)
593				{
594					m_testCtx.getLog() << TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
595														   << reference << ", got " << result
596									   << TestLog::EndMessage;
597				}
598			}
599
600			if (!isOk && m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
601				m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Got invalid lookup result");
602		}
603
604		// Check results of other invocations against first one
605		for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
606		{
607			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
608			{
609				const deUint32*		refPtr		= &outData[lookupNdx*outLookupStride];
610				const deUint32*		resPtr		= refPtr + invocationNdx*4;
611				bool				isOk		= true;
612
613				for (int ndx = 0; ndx < 4; ndx++)
614					isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
615
616				if (!isOk)
617				{
618					m_testCtx.getLog() << TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
619														   << tcu::formatArray(tcu::Format::HexIterator<deUint32>(resPtr), tcu::Format::HexIterator<deUint32>(resPtr+4))
620														   << " for lookup " << lookupNdx << " doesn't match result from first invocation "
621														   << tcu::formatArray(tcu::Format::HexIterator<deUint32>(refPtr), tcu::Format::HexIterator<deUint32>(refPtr+4))
622									   << TestLog::EndMessage;
623
624					if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
625						m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Inconsistent lookup results");
626				}
627			}
628		}
629	}
630
631	return STOP;
632}
633
634class BlockArrayIndexingCase : public TestCase
635{
636public:
637	enum BlockType
638	{
639		BLOCKTYPE_UNIFORM = 0,
640		BLOCKTYPE_BUFFER,
641
642		BLOCKTYPE_LAST
643	};
644								BlockArrayIndexingCase		(Context& context, const char* name, const char* description, BlockType blockType, IndexExprType indexExprType, ShaderType shaderType);
645								~BlockArrayIndexingCase		(void);
646
647	void						init						(void);
648	IterateResult				iterate						(void);
649
650private:
651								BlockArrayIndexingCase		(const BlockArrayIndexingCase&);
652	BlockArrayIndexingCase&		operator=					(const BlockArrayIndexingCase&);
653
654	void						getShaderSpec				(ShaderSpec* spec, int numInstances, int numReads, const int* readIndices) const;
655
656	const BlockType				m_blockType;
657	const IndexExprType			m_indexExprType;
658	const ShaderType			m_shaderType;
659};
660
661BlockArrayIndexingCase::BlockArrayIndexingCase (Context& context, const char* name, const char* description, BlockType blockType, IndexExprType indexExprType, ShaderType shaderType)
662	: TestCase			(context, name, description)
663	, m_blockType		(blockType)
664	, m_indexExprType	(indexExprType)
665	, m_shaderType		(shaderType)
666{
667}
668
669BlockArrayIndexingCase::~BlockArrayIndexingCase (void)
670{
671}
672
673void BlockArrayIndexingCase::init (void)
674{
675	const char* extName = "GL_EXT_gpu_shader5";
676
677	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL &&
678		!m_context.getContextInfo().isExtensionSupported(extName))
679		throw tcu::NotSupportedError(string(extName) + " extension is required for dynamic indexing of interface blocks");
680}
681
682void BlockArrayIndexingCase::getShaderSpec (ShaderSpec* spec, int numInstances, int numReads, const int* readIndices) const
683{
684	const int			binding			= 2;
685	const char*			blockName		= "Block";
686	const char*			instanceName	= "block";
687	const char*			indicesPrefix	= "index";
688	const char*			resultPrefix	= "result";
689	const char*			interfaceName	= m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "buffer";
690	const char*			layout			= m_blockType == BLOCKTYPE_UNIFORM ? "std140" : "std430";
691	std::ostringstream	global, code;
692
693	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
694		global << "#extension GL_EXT_gpu_shader5 : require\n";
695
696	global <<
697		"layout(" << layout << ", binding = " << binding << ") " << interfaceName << " " << blockName << "\n"
698		"{\n"
699		"	uint value;\n"
700		"} " << instanceName << "[" << numInstances << "];\n";
701
702	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
703	{
704		for (int readNdx = 0; readNdx < numReads; readNdx++)
705		{
706			const string varName = indicesPrefix + de::toString(readNdx);
707			spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
708		}
709	}
710	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
711		declareUniformIndexVars(global, indicesPrefix, numReads);
712
713	for (int readNdx = 0; readNdx < numReads; readNdx++)
714	{
715		const string varName = resultPrefix + de::toString(readNdx);
716		spec->outputs.push_back(Symbol(varName, VarType(TYPE_UINT, PRECISION_HIGHP)));
717	}
718
719	for (int readNdx = 0; readNdx < numReads; readNdx++)
720	{
721		code << resultPrefix << readNdx << " = " << instanceName << "[";
722
723		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
724			code << readIndices[readNdx];
725		else
726			code << indicesPrefix << readNdx;
727
728		code << "].value;\n";
729	}
730
731	spec->version				= GLSL_VERSION_310_ES;
732	spec->globalDeclarations	= global.str();
733	spec->source				= code.str();
734}
735
736BlockArrayIndexingCase::IterateResult BlockArrayIndexingCase::iterate (void)
737{
738	const int			numInvocations		= 32;
739	const int			numInstances		= 4;
740	const int			numReads			= 4;
741	vector<int>			readIndices			(numReads);
742	vector<deUint32>	inValues			(numInstances);
743	vector<deUint32>	outValues			(numInvocations*numReads);
744	ShaderSpec			shaderSpec;
745	de::Random			rnd					(deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
746
747	for (int readNdx = 0; readNdx < numReads; readNdx++)
748		readIndices[readNdx] = rnd.getInt(0, numInstances-1);
749
750	for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
751		inValues[instanceNdx] = rnd.getUint32();
752
753	getShaderSpec(&shaderSpec, numInstances, numReads, &readIndices[0]);
754
755	{
756		const RenderContext&	renderCtx		= m_context.getRenderContext();
757		const glw::Functions&	gl				= renderCtx.getFunctions();
758		const int				baseBinding		= 2;
759		const BufferVector		buffers			(renderCtx, numInstances);
760		const deUint32			bufTarget		= m_blockType == BLOCKTYPE_BUFFER ? GL_SHADER_STORAGE_BUFFER : GL_UNIFORM_BUFFER;
761		ShaderExecutorPtr		shaderExecutor	(createExecutor(renderCtx, m_shaderType, shaderSpec));
762		vector<int>				expandedIndices;
763		vector<void*>			inputs;
764		vector<void*>			outputs;
765
766		m_testCtx.getLog() << *shaderExecutor;
767
768		if (!shaderExecutor->isOk())
769			TCU_FAIL("Compile failed");
770
771		shaderExecutor->useProgram();
772
773		for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
774		{
775			gl.bindBuffer(bufTarget, buffers[instanceNdx]);
776			gl.bufferData(bufTarget, (glw::GLsizeiptr)sizeof(deUint32), &inValues[instanceNdx], GL_STATIC_DRAW);
777			gl.bindBufferBase(bufTarget, baseBinding+instanceNdx, buffers[instanceNdx]);
778		}
779
780		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
781		{
782			expandedIndices.resize(numInvocations * readIndices.size());
783
784			for (int readNdx = 0; readNdx < numReads; readNdx++)
785			{
786				int* dst = &expandedIndices[numInvocations*readNdx];
787				std::fill(dst, dst+numInvocations, readIndices[readNdx]);
788			}
789
790			for (int readNdx = 0; readNdx < numReads; readNdx++)
791				inputs.push_back(&expandedIndices[readNdx*numInvocations]);
792		}
793		else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
794			uploadUniformIndices(gl, shaderExecutor->getProgram(), "index", numReads, &readIndices[0]);
795
796		for (int readNdx = 0; readNdx < numReads; readNdx++)
797			outputs.push_back(&outValues[readNdx*numInvocations]);
798
799		GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
800
801		shaderExecutor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
802	}
803
804	m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
805
806	for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
807	{
808		for (int readNdx = 0; readNdx < numReads; readNdx++)
809		{
810			const deUint32	refValue	= inValues[readIndices[readNdx]];
811			const deUint32	resValue	= outValues[readNdx*numInvocations + invocationNdx];
812
813			if (refValue != resValue)
814			{
815				m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx
816													   << ", read " << readNdx << ": expected "
817													   << tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
818								   << TestLog::EndMessage;
819
820				if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
821					m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid result value");
822			}
823		}
824	}
825
826	return STOP;
827}
828
829class AtomicCounterIndexingCase : public TestCase
830{
831public:
832								AtomicCounterIndexingCase		(Context& context, const char* name, const char* description, IndexExprType indexExprType, ShaderType shaderType);
833								~AtomicCounterIndexingCase		(void);
834
835	void						init							(void);
836	IterateResult				iterate							(void);
837
838private:
839								AtomicCounterIndexingCase		(const AtomicCounterIndexingCase&);
840	AtomicCounterIndexingCase&	operator=						(const AtomicCounterIndexingCase&);
841
842	void						getShaderSpec					(ShaderSpec* spec, int numCounters, int numOps, const int* opIndices) const;
843
844	const IndexExprType			m_indexExprType;
845	const glu::ShaderType		m_shaderType;
846};
847
848AtomicCounterIndexingCase::AtomicCounterIndexingCase (Context& context, const char* name, const char* description, IndexExprType indexExprType, ShaderType shaderType)
849	: TestCase			(context, name, description)
850	, m_indexExprType	(indexExprType)
851	, m_shaderType		(shaderType)
852{
853}
854
855AtomicCounterIndexingCase::~AtomicCounterIndexingCase (void)
856{
857}
858
859void AtomicCounterIndexingCase::init (void)
860{
861	const char* extName = "GL_EXT_gpu_shader5";
862
863	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL &&
864		!m_context.getContextInfo().isExtensionSupported(extName))
865		throw tcu::NotSupportedError(string(extName) + " extension is required for dynamic indexing of atomic counters");
866
867	if (m_shaderType == glu::SHADERTYPE_VERTEX || m_shaderType == glu::SHADERTYPE_FRAGMENT)
868	{
869		int numAtomicCounterBuffers = 0;
870		m_context.getRenderContext().getFunctions().getIntegerv(m_shaderType == glu::SHADERTYPE_VERTEX ? GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS
871																									   : GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS,
872																&numAtomicCounterBuffers);
873
874		if (numAtomicCounterBuffers == 0)
875			throw tcu::NotSupportedError(string("Atomic counters not supported in ") + glu::getShaderTypeName(m_shaderType) + " shader");
876	}
877}
878
879void AtomicCounterIndexingCase::getShaderSpec (ShaderSpec* spec, int numCounters, int numOps, const int* opIndices) const
880{
881	const char*			indicesPrefix	= "index";
882	const char*			resultPrefix	= "result";
883	std::ostringstream	global, code;
884
885	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
886		global << "#extension GL_EXT_gpu_shader5 : require\n";
887
888	global <<
889		"layout(binding = 0) uniform atomic_uint counter[" << numCounters << "];\n";
890
891	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
892	{
893		for (int opNdx = 0; opNdx < numOps; opNdx++)
894		{
895			const string varName = indicesPrefix + de::toString(opNdx);
896			spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
897		}
898	}
899	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
900		declareUniformIndexVars(global, indicesPrefix, numOps);
901
902	for (int opNdx = 0; opNdx < numOps; opNdx++)
903	{
904		const string varName = resultPrefix + de::toString(opNdx);
905		spec->outputs.push_back(Symbol(varName, VarType(TYPE_UINT, PRECISION_HIGHP)));
906	}
907
908	for (int opNdx = 0; opNdx < numOps; opNdx++)
909	{
910		code << resultPrefix << opNdx << " = atomicCounterIncrement(counter[";
911
912		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
913			code << opIndices[opNdx];
914		else
915			code << indicesPrefix << opNdx;
916
917		code << "]);\n";
918	}
919
920	spec->version				= GLSL_VERSION_310_ES;
921	spec->globalDeclarations	= global.str();
922	spec->source				= code.str();
923}
924
925AtomicCounterIndexingCase::IterateResult AtomicCounterIndexingCase::iterate (void)
926{
927	const RenderContext&	renderCtx			= m_context.getRenderContext();
928	const glw::Functions&	gl					= renderCtx.getFunctions();
929	const Buffer			counterBuffer		(renderCtx);
930
931	const int				numInvocations		= 32;
932	const int				numCounters			= 4;
933	const int				numOps				= 4;
934	vector<int>				opIndices			(numOps);
935	vector<deUint32>		outValues			(numInvocations*numOps);
936	ShaderSpec				shaderSpec;
937	de::Random				rnd					(deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
938
939	for (int opNdx = 0; opNdx < numOps; opNdx++)
940		opIndices[opNdx] = rnd.getInt(0, numOps-1);
941
942	getShaderSpec(&shaderSpec, numCounters, numOps, &opIndices[0]);
943
944	{
945		const BufferVector		buffers			(renderCtx, numCounters);
946		ShaderExecutorPtr		shaderExecutor	(createExecutor(renderCtx, m_shaderType, shaderSpec));
947		vector<int>				expandedIndices;
948		vector<void*>			inputs;
949		vector<void*>			outputs;
950
951		m_testCtx.getLog() << *shaderExecutor;
952
953		if (!shaderExecutor->isOk())
954			TCU_FAIL("Compile failed");
955
956		{
957			const int				bufSize		= getProgramResourceInt(gl, shaderExecutor->getProgram(), GL_ATOMIC_COUNTER_BUFFER, 0, GL_BUFFER_DATA_SIZE);
958			const int				maxNdx		= maxElement(opIndices);
959			std::vector<deUint8>	emptyData	(numCounters*4, 0);
960
961			if (bufSize < (maxNdx+1)*4)
962				TCU_FAIL((string("GL reported invalid buffer size " + de::toString(bufSize)).c_str()));
963
964			gl.bindBuffer(GL_ATOMIC_COUNTER_BUFFER, *counterBuffer);
965			gl.bufferData(GL_ATOMIC_COUNTER_BUFFER, (glw::GLsizeiptr)emptyData.size(), &emptyData[0], GL_STATIC_DRAW);
966			gl.bindBufferBase(GL_ATOMIC_COUNTER_BUFFER, 0, *counterBuffer);
967			GLU_EXPECT_NO_ERROR(gl.getError(), "Atomic counter buffer initialization failed");
968		}
969
970		shaderExecutor->useProgram();
971
972		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
973		{
974			expandedIndices.resize(numInvocations * opIndices.size());
975
976			for (int opNdx = 0; opNdx < numOps; opNdx++)
977			{
978				int* dst = &expandedIndices[numInvocations*opNdx];
979				std::fill(dst, dst+numInvocations, opIndices[opNdx]);
980			}
981
982			for (int opNdx = 0; opNdx < numOps; opNdx++)
983				inputs.push_back(&expandedIndices[opNdx*numInvocations]);
984		}
985		else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
986			uploadUniformIndices(gl, shaderExecutor->getProgram(), "index", numOps, &opIndices[0]);
987
988		for (int opNdx = 0; opNdx < numOps; opNdx++)
989			outputs.push_back(&outValues[opNdx*numInvocations]);
990
991		GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
992
993		shaderExecutor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
994	}
995
996	m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
997
998	{
999		vector<int>				numHits			(numCounters, 0);	// Number of hits per counter.
1000		vector<deUint32>		counterValues	(numCounters);
1001		vector<vector<bool> >	counterMasks	(numCounters);
1002
1003		for (int opNdx = 0; opNdx < numOps; opNdx++)
1004			numHits[opIndices[opNdx]] += 1;
1005
1006		// Read counter values
1007		{
1008			const void* mapPtr = DE_NULL;
1009
1010			try
1011			{
1012				mapPtr = gl.mapBufferRange(GL_ATOMIC_COUNTER_BUFFER, 0, numCounters*4, GL_MAP_READ_BIT);
1013				GLU_EXPECT_NO_ERROR(gl.getError(), "glMapBufferRange(GL_ATOMIC_COUNTER_BUFFER)");
1014				TCU_CHECK(mapPtr);
1015				std::copy((const deUint32*)mapPtr, (const deUint32*)mapPtr + numCounters, &counterValues[0]);
1016				gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
1017			}
1018			catch (...)
1019			{
1020				if (mapPtr)
1021					gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
1022				throw;
1023			}
1024		}
1025
1026		// Verify counter values
1027		for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1028		{
1029			const deUint32		refCount	= (deUint32)(numHits[counterNdx]*numInvocations);
1030			const deUint32		resCount	= counterValues[counterNdx];
1031
1032			if (refCount != resCount)
1033			{
1034				m_testCtx.getLog() << TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1035													   << ", expected " << refCount
1036								   << TestLog::EndMessage;
1037
1038				if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1039					m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid atomic counter value");
1040			}
1041		}
1042
1043		// Allocate bitmasks - one bit per each valid result value
1044		for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1045		{
1046			const int	counterValue	= numHits[counterNdx]*numInvocations;
1047			counterMasks[counterNdx].resize(counterValue, false);
1048		}
1049
1050		// Verify result values from shaders
1051		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1052		{
1053			for (int opNdx = 0; opNdx < numOps; opNdx++)
1054			{
1055				const int		counterNdx	= opIndices[opNdx];
1056				const deUint32	resValue	= outValues[opNdx*numInvocations + invocationNdx];
1057				const bool		rangeOk		= de::inBounds(resValue, 0u, (deUint32)counterMasks[counterNdx].size());
1058				const bool		notSeen		= rangeOk && !counterMasks[counterNdx][resValue];
1059				const bool		isOk		= rangeOk && notSeen;
1060
1061				if (!isOk)
1062				{
1063					m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx
1064														   << ", op " << opNdx << ": got invalid result value "
1065														   << resValue
1066									   << TestLog::EndMessage;
1067
1068					if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1069						m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid result value");
1070				}
1071				else
1072				{
1073					// Mark as used - no other invocation should see this value from same counter.
1074					counterMasks[counterNdx][resValue] = true;
1075				}
1076			}
1077		}
1078
1079		if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1080		{
1081			// Consistency check - all masks should be 1 now
1082			for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1083			{
1084				for (vector<bool>::const_iterator i = counterMasks[counterNdx].begin(); i != counterMasks[counterNdx].end(); i++)
1085					TCU_CHECK_INTERNAL(*i);
1086			}
1087		}
1088	}
1089
1090	return STOP;
1091}
1092
1093} // anonymous
1094
1095OpaqueTypeIndexingTests::OpaqueTypeIndexingTests (Context& context)
1096	: TestCaseGroup(context, "opaque_type_indexing", "Opaque Type Indexing Tests")
1097{
1098}
1099
1100OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests (void)
1101{
1102}
1103
1104void OpaqueTypeIndexingTests::init (void)
1105{
1106	static const struct
1107	{
1108		IndexExprType	type;
1109		const char*		name;
1110		const char*		description;
1111	} indexingTypes[] =
1112	{
1113		{ INDEX_EXPR_TYPE_CONST_LITERAL,	"const_literal",		"Indexing by constant literal expression"		},
1114		{ INDEX_EXPR_TYPE_UNIFORM,			"uniform",				"Indexing by uniform value"						},
1115		{ INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,	"dynamically_uniform",	"Indexing by dynamically uniform expression"	}
1116	};
1117
1118	static const struct
1119	{
1120		ShaderType		type;
1121		const char*		name;
1122	} shaderTypes[] =
1123	{
1124		{ SHADERTYPE_VERTEX,		"vertex"	},
1125		{ SHADERTYPE_FRAGMENT,		"fragment"	},
1126		{ SHADERTYPE_COMPUTE,		"compute"	}
1127	};
1128
1129	// .sampler
1130	{
1131		static const DataType samplerTypes[] =
1132		{
1133			// \note 1D images will be added by a later extension.
1134//			TYPE_SAMPLER_1D,
1135			TYPE_SAMPLER_2D,
1136			TYPE_SAMPLER_CUBE,
1137			TYPE_SAMPLER_2D_ARRAY,
1138			TYPE_SAMPLER_3D,
1139//			TYPE_SAMPLER_1D_SHADOW,
1140			TYPE_SAMPLER_2D_SHADOW,
1141			TYPE_SAMPLER_CUBE_SHADOW,
1142			TYPE_SAMPLER_2D_ARRAY_SHADOW,
1143//			TYPE_INT_SAMPLER_1D,
1144			TYPE_INT_SAMPLER_2D,
1145			TYPE_INT_SAMPLER_CUBE,
1146			TYPE_INT_SAMPLER_2D_ARRAY,
1147			TYPE_INT_SAMPLER_3D,
1148//			TYPE_UINT_SAMPLER_1D,
1149			TYPE_UINT_SAMPLER_2D,
1150			TYPE_UINT_SAMPLER_CUBE,
1151			TYPE_UINT_SAMPLER_2D_ARRAY,
1152			TYPE_UINT_SAMPLER_3D,
1153		};
1154
1155		tcu::TestCaseGroup* const samplerGroup = new tcu::TestCaseGroup(m_testCtx, "sampler", "Sampler Array Indexing Tests");
1156		addChild(samplerGroup);
1157
1158		for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1159		{
1160			const IndexExprType			indexExprType	= indexingTypes[indexTypeNdx].type;
1161			tcu::TestCaseGroup* const	indexGroup		= new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name, indexingTypes[indexTypeNdx].description);
1162			samplerGroup->addChild(indexGroup);
1163
1164			for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1165			{
1166				const ShaderType			shaderType		= shaderTypes[shaderTypeNdx].type;
1167				tcu::TestCaseGroup* const	shaderGroup		= new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name, "");
1168				indexGroup->addChild(shaderGroup);
1169
1170				for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
1171				{
1172					const DataType	samplerType	= samplerTypes[samplerTypeNdx];
1173					const char*		samplerName	= getDataTypeName(samplerType);
1174					const string	caseName	= de::toLower(samplerName);
1175
1176					shaderGroup->addChild(new SamplerIndexingCase(m_context, caseName.c_str(), "", shaderType, samplerType, indexExprType));
1177				}
1178			}
1179		}
1180	}
1181
1182	// .ubo / .ssbo / .atomic_counter
1183	{
1184		tcu::TestCaseGroup* const	uboGroup	= new tcu::TestCaseGroup(m_testCtx, "ubo",				"Uniform Block Instance Array Indexing Tests");
1185		tcu::TestCaseGroup* const	ssboGroup	= new tcu::TestCaseGroup(m_testCtx, "ssbo",				"Buffer Block Instance Array Indexing Tests");
1186		tcu::TestCaseGroup* const	acGroup		= new tcu::TestCaseGroup(m_testCtx, "atomic_counter",	"Atomic Counter Array Indexing Tests");
1187		addChild(uboGroup);
1188		addChild(ssboGroup);
1189		addChild(acGroup);
1190
1191		for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1192		{
1193			const IndexExprType		indexExprType		= indexingTypes[indexTypeNdx].type;
1194			const char*				indexExprName		= indexingTypes[indexTypeNdx].name;
1195			const char*				indexExprDesc		= indexingTypes[indexTypeNdx].description;
1196
1197			for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1198			{
1199				const ShaderType		shaderType		= shaderTypes[shaderTypeNdx].type;
1200				const string			name			= string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
1201
1202				uboGroup->addChild	(new BlockArrayIndexingCase		(m_context, name.c_str(), indexExprDesc, BlockArrayIndexingCase::BLOCKTYPE_UNIFORM,	indexExprType, shaderType));
1203				ssboGroup->addChild	(new BlockArrayIndexingCase		(m_context, name.c_str(), indexExprDesc, BlockArrayIndexingCase::BLOCKTYPE_BUFFER,	indexExprType, shaderType));
1204				acGroup->addChild	(new AtomicCounterIndexingCase	(m_context, name.c_str(), indexExprDesc, indexExprType, shaderType));
1205			}
1206		}
1207	}
1208}
1209
1210} // Functional
1211} // gles31
1212} // deqp
1213