es31fShaderCommonFunctionTests.cpp revision 1e44f197ded674677381f65c840820728cf36efb
1/*-------------------------------------------------------------------------
2 * drawElements Quality Program OpenGL ES 3.1 Module
3 * -------------------------------------------------
4 *
5 * Copyright 2014 The Android Open Source Project
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 *      http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Common built-in function tests.
22 *//*--------------------------------------------------------------------*/
23
24#include "es31fShaderCommonFunctionTests.hpp"
25#include "gluContextInfo.hpp"
26#include "glsShaderExecUtil.hpp"
27#include "tcuTestLog.hpp"
28#include "tcuFormatUtil.hpp"
29#include "tcuFloat.hpp"
30#include "deRandom.hpp"
31#include "deMath.h"
32#include "deString.h"
33
34namespace deqp
35{
36namespace gles31
37{
38namespace Functional
39{
40
41using std::vector;
42using std::string;
43using tcu::TestLog;
44using namespace gls::ShaderExecUtil;
45
46using tcu::Vec2;
47using tcu::Vec3;
48using tcu::Vec4;
49using tcu::IVec2;
50using tcu::IVec3;
51using tcu::IVec4;
52
53// Utilities
54
55template<typename T, int Size>
56struct VecArrayAccess
57{
58public:
59									VecArrayAccess	(const void* ptr) : m_array((tcu::Vector<T, Size>*)ptr) {}
60									~VecArrayAccess	(void) {}
61
62	const tcu::Vector<T, Size>&		operator[]		(size_t offset) const	{ return m_array[offset];	}
63	tcu::Vector<T, Size>&			operator[]		(size_t offset)			{ return m_array[offset];	}
64
65private:
66	tcu::Vector<T, Size>*			m_array;
67};
68
69template<typename T>	T			randomScalar	(de::Random& rnd, T minValue, T maxValue);
70template<> inline		float		randomScalar	(de::Random& rnd, float minValue, float maxValue)		{ return rnd.getFloat(minValue, maxValue);	}
71template<> inline		deInt32		randomScalar	(de::Random& rnd, deInt32 minValue, deInt32 maxValue)	{ return rnd.getInt(minValue, maxValue);	}
72template<> inline		deUint32	randomScalar	(de::Random& rnd, deUint32 minValue, deUint32 maxValue)	{ return minValue + rnd.getUint32() % (maxValue - minValue + 1); }
73
74template<typename T, int Size>
75inline tcu::Vector<T, Size> randomVector (de::Random& rnd, const tcu::Vector<T, Size>& minValue, const tcu::Vector<T, Size>& maxValue)
76{
77	tcu::Vector<T, Size> res;
78	for (int ndx = 0; ndx < Size; ndx++)
79		res[ndx] = randomScalar<T>(rnd, minValue[ndx], maxValue[ndx]);
80	return res;
81}
82
83template<typename T, int Size>
84static void fillRandomVectors (de::Random& rnd, const tcu::Vector<T, Size>& minValue, const tcu::Vector<T, Size>& maxValue, void* dst, int numValues, int offset = 0)
85{
86	VecArrayAccess<T, Size> access(dst);
87	for (int ndx = 0; ndx < numValues; ndx++)
88		access[offset + ndx] = randomVector<T, Size>(rnd, minValue, maxValue);
89}
90
91template<typename T>
92static void fillRandomScalars (de::Random& rnd, T minValue, T maxValue, void* dst, int numValues, int offset = 0)
93{
94	T* typedPtr = (T*)dst;
95	for (int ndx = 0; ndx < numValues; ndx++)
96		typedPtr[offset + ndx] = randomScalar<T>(rnd, minValue, maxValue);
97}
98
99inline int numBitsLostInOp (float input, float output)
100{
101	const int	inExp		= tcu::Float32(input).exponent();
102	const int	outExp		= tcu::Float32(output).exponent();
103
104	return de::max(0, inExp-outExp); // Lost due to mantissa shift.
105}
106
107inline deUint32 getUlpDiff (float a, float b)
108{
109	const deUint32	aBits	= tcu::Float32(a).bits();
110	const deUint32	bBits	= tcu::Float32(b).bits();
111	return aBits > bBits ? aBits - bBits : bBits - aBits;
112}
113
114inline deUint32 getUlpDiffIgnoreZeroSign (float a, float b)
115{
116	if (tcu::Float32(a).isZero())
117		return getUlpDiff(tcu::Float32::construct(tcu::Float32(b).sign(), 0, 0).asFloat(), b);
118	else if (tcu::Float32(b).isZero())
119		return getUlpDiff(a, tcu::Float32::construct(tcu::Float32(a).sign(), 0, 0).asFloat());
120	else
121		return getUlpDiff(a, b);
122}
123
124inline bool supportsSignedZero (glu::Precision precision)
125{
126	// \note GLSL ES 3.1 doesn't really require support for -0, but we require it for highp
127	//		 as it is very widely supported.
128	return precision == glu::PRECISION_HIGHP;
129}
130
131inline float getEpsFromMaxUlpDiff (float value, deUint32 ulpDiff)
132{
133	const int exp = tcu::Float32(value).exponent();
134	return tcu::Float32::construct(+1, exp, (1u<<23) | ulpDiff).asFloat() - tcu::Float32::construct(+1, exp, 1u<<23).asFloat();
135}
136
137inline deUint32 getMaxUlpDiffFromBits (int numAccurateBits)
138{
139	const int		numGarbageBits	= 23-numAccurateBits;
140	const deUint32	mask			= (1u<<numGarbageBits)-1u;
141
142	return mask;
143}
144
145inline float getEpsFromBits (float value, int numAccurateBits)
146{
147	return getEpsFromMaxUlpDiff(value, getMaxUlpDiffFromBits(numAccurateBits));
148}
149
150static int getMinMantissaBits (glu::Precision precision)
151{
152	const int bits[] =
153	{
154		7,		// lowp
155		10,		// mediump
156		23		// highp
157	};
158	DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(bits) == glu::PRECISION_LAST);
159	DE_ASSERT(de::inBounds<int>(precision, 0, DE_LENGTH_OF_ARRAY(bits)));
160	return bits[precision];
161}
162
163static int getMaxNormalizedValueExponent (glu::Precision precision)
164{
165	const int exponent[] =
166	{
167		0,		// lowp
168		13,		// mediump
169		127		// highp
170	};
171	DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(exponent) == glu::PRECISION_LAST);
172	DE_ASSERT(de::inBounds<int>(precision, 0, DE_LENGTH_OF_ARRAY(exponent)));
173	return exponent[precision];
174}
175
176static int getMinNormalizedValueExponent (glu::Precision precision)
177{
178	const int exponent[] =
179	{
180		-7,		// lowp
181		-13,	// mediump
182		-126	// highp
183	};
184	DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(exponent) == glu::PRECISION_LAST);
185	DE_ASSERT(de::inBounds<int>(precision, 0, DE_LENGTH_OF_ARRAY(exponent)));
186	return exponent[precision];
187}
188
189// CommonFunctionCase
190
191class CommonFunctionCase : public TestCase
192{
193public:
194							CommonFunctionCase		(Context& context, const char* name, const char* description, glu::ShaderType shaderType);
195							~CommonFunctionCase		(void);
196
197	void					init					(void);
198	void					deinit					(void);
199	IterateResult			iterate					(void);
200
201protected:
202							CommonFunctionCase		(const CommonFunctionCase& other);
203	CommonFunctionCase&		operator=				(const CommonFunctionCase& other);
204
205	virtual void			getInputValues			(int numValues, void* const* values) const = 0;
206	virtual bool			compare					(const void* const* inputs, const void* const* outputs) = 0;
207
208	glu::ShaderType			m_shaderType;
209	ShaderSpec				m_spec;
210	int						m_numValues;
211
212	std::ostringstream		m_failMsg;				//!< Comparison failure help message.
213
214private:
215	ShaderExecutor*			m_executor;
216};
217
218CommonFunctionCase::CommonFunctionCase (Context& context, const char* name, const char* description, glu::ShaderType shaderType)
219	: TestCase		(context, name, description)
220	, m_shaderType	(shaderType)
221	, m_numValues	(100)
222	, m_executor	(DE_NULL)
223{
224	m_spec.version = glu::GLSL_VERSION_310_ES;
225}
226
227CommonFunctionCase::~CommonFunctionCase (void)
228{
229	CommonFunctionCase::deinit();
230}
231
232void CommonFunctionCase::init (void)
233{
234	DE_ASSERT(!m_executor);
235
236	m_executor = createExecutor(m_context.getRenderContext(), m_shaderType, m_spec);
237	m_testCtx.getLog() << m_executor;
238
239	if (!m_executor->isOk())
240		throw tcu::TestError("Compile failed");
241}
242
243void CommonFunctionCase::deinit (void)
244{
245	delete m_executor;
246	m_executor = DE_NULL;
247}
248
249static vector<int> getScalarSizes (const vector<Symbol>& symbols)
250{
251	vector<int> sizes(symbols.size());
252	for (int ndx = 0; ndx < (int)symbols.size(); ++ndx)
253		sizes[ndx] = symbols[ndx].varType.getScalarSize();
254	return sizes;
255}
256
257static int computeTotalScalarSize (const vector<Symbol>& symbols)
258{
259	int totalSize = 0;
260	for (vector<Symbol>::const_iterator sym = symbols.begin(); sym != symbols.end(); ++sym)
261		totalSize += sym->varType.getScalarSize();
262	return totalSize;
263}
264
265static vector<void*> getInputOutputPointers (const vector<Symbol>& symbols, vector<deUint32>& data, const int numValues)
266{
267	vector<void*>	pointers		(symbols.size());
268	int				curScalarOffset	= 0;
269
270	for (int varNdx = 0; varNdx < (int)symbols.size(); ++varNdx)
271	{
272		const Symbol&	var				= symbols[varNdx];
273		const int		scalarSize		= var.varType.getScalarSize();
274
275		// Uses planar layout as input/output specs do not support strides.
276		pointers[varNdx] = &data[curScalarOffset];
277		curScalarOffset += scalarSize*numValues;
278	}
279
280	DE_ASSERT(curScalarOffset == (int)data.size());
281
282	return pointers;
283}
284
285// \todo [2013-08-08 pyry] Make generic utility and move to glu?
286
287struct HexFloat
288{
289	const float value;
290	HexFloat (const float value_) : value(value_) {}
291};
292
293std::ostream& operator<< (std::ostream& str, const HexFloat& v)
294{
295	return str << v.value << " / " << tcu::toHex(tcu::Float32(v.value).bits());
296}
297
298struct HexBool
299{
300	const deUint32 value;
301	HexBool (const deUint32 value_) : value(value_) {}
302};
303
304std::ostream& operator<< (std::ostream& str, const HexBool& v)
305{
306	return str << (v.value ? "true" : "false") << " / " << tcu::toHex(v.value);
307}
308
309struct VarValue
310{
311	const glu::VarType&	type;
312	const void*			value;
313
314	VarValue (const glu::VarType& type_, const void* value_) : type(type_), value(value_) {}
315};
316
317std::ostream& operator<< (std::ostream& str, const VarValue& varValue)
318{
319	DE_ASSERT(varValue.type.isBasicType());
320
321	const glu::DataType		basicType		= varValue.type.getBasicType();
322	const glu::DataType		scalarType		= glu::getDataTypeScalarType(basicType);
323	const int				numComponents	= glu::getDataTypeScalarSize(basicType);
324
325	if (numComponents > 1)
326		str << glu::getDataTypeName(basicType) << "(";
327
328	for (int compNdx = 0; compNdx < numComponents; compNdx++)
329	{
330		if (compNdx != 0)
331			str << ", ";
332
333		switch (scalarType)
334		{
335			case glu::TYPE_FLOAT:	str << HexFloat(((const float*)varValue.value)[compNdx]);			break;
336			case glu::TYPE_INT:		str << ((const deInt32*)varValue.value)[compNdx];					break;
337			case glu::TYPE_UINT:	str << tcu::toHex(((const deUint32*)varValue.value)[compNdx]);		break;
338			case glu::TYPE_BOOL:	str << HexBool(((const deUint32*)varValue.value)[compNdx]);			break;
339
340			default:
341				DE_ASSERT(false);
342		}
343	}
344
345	if (numComponents > 1)
346		str << ")";
347
348	return str;
349}
350
351CommonFunctionCase::IterateResult CommonFunctionCase::iterate (void)
352{
353	const int				numInputScalars			= computeTotalScalarSize(m_spec.inputs);
354	const int				numOutputScalars		= computeTotalScalarSize(m_spec.outputs);
355	vector<deUint32>		inputData				(numInputScalars * m_numValues);
356	vector<deUint32>		outputData				(numOutputScalars * m_numValues);
357	const vector<void*>		inputPointers			= getInputOutputPointers(m_spec.inputs, inputData, m_numValues);
358	const vector<void*>		outputPointers			= getInputOutputPointers(m_spec.outputs, outputData, m_numValues);
359
360	// Initialize input data.
361	getInputValues(m_numValues, &inputPointers[0]);
362
363	// Execute shader.
364	m_executor->useProgram();
365	m_executor->execute(m_numValues, &inputPointers[0], &outputPointers[0]);
366
367	// Compare results.
368	{
369		const vector<int>		inScalarSizes		= getScalarSizes(m_spec.inputs);
370		const vector<int>		outScalarSizes		= getScalarSizes(m_spec.outputs);
371		vector<void*>			curInputPtr			(inputPointers.size());
372		vector<void*>			curOutputPtr		(outputPointers.size());
373		int						numFailed			= 0;
374
375		for (int valNdx = 0; valNdx < m_numValues; valNdx++)
376		{
377			// Set up pointers for comparison.
378			for (int inNdx = 0; inNdx < (int)curInputPtr.size(); ++inNdx)
379				curInputPtr[inNdx] = (deUint32*)inputPointers[inNdx] + inScalarSizes[inNdx]*valNdx;
380
381			for (int outNdx = 0; outNdx < (int)curOutputPtr.size(); ++outNdx)
382				curOutputPtr[outNdx] = (deUint32*)outputPointers[outNdx] + outScalarSizes[outNdx]*valNdx;
383
384			if (!compare(&curInputPtr[0], &curOutputPtr[0]))
385			{
386				// \todo [2013-08-08 pyry] We probably want to log reference value as well?
387
388				m_testCtx.getLog() << TestLog::Message << "ERROR: comparison failed for value " << valNdx << ":\n  " << m_failMsg.str() << TestLog::EndMessage;
389
390				m_testCtx.getLog() << TestLog::Message << "  inputs:" << TestLog::EndMessage;
391				for (int inNdx = 0; inNdx < (int)curInputPtr.size(); inNdx++)
392					m_testCtx.getLog() << TestLog::Message << "    " << m_spec.inputs[inNdx].name << " = "
393														   << VarValue(m_spec.inputs[inNdx].varType, curInputPtr[inNdx])
394									   << TestLog::EndMessage;
395
396				m_testCtx.getLog() << TestLog::Message << "  outputs:" << TestLog::EndMessage;
397				for (int outNdx = 0; outNdx < (int)curOutputPtr.size(); outNdx++)
398					m_testCtx.getLog() << TestLog::Message << "    " << m_spec.outputs[outNdx].name << " = "
399														   << VarValue(m_spec.outputs[outNdx].varType, curOutputPtr[outNdx])
400									   << TestLog::EndMessage;
401
402				m_failMsg.str("");
403				m_failMsg.clear();
404				numFailed += 1;
405			}
406		}
407
408		m_testCtx.getLog() << TestLog::Message << (m_numValues - numFailed) << " / " << m_numValues << " values passed" << TestLog::EndMessage;
409
410		m_testCtx.setTestResult(numFailed == 0 ? QP_TEST_RESULT_PASS	: QP_TEST_RESULT_FAIL,
411								numFailed == 0 ? "Pass"					: "Result comparison failed");
412	}
413
414	return STOP;
415}
416
417static const char* getPrecisionPostfix (glu::Precision precision)
418{
419	static const char* s_postfix[] =
420	{
421		"_lowp",
422		"_mediump",
423		"_highp"
424	};
425	DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(s_postfix) == glu::PRECISION_LAST);
426	DE_ASSERT(de::inBounds<int>(precision, 0, DE_LENGTH_OF_ARRAY(s_postfix)));
427	return s_postfix[precision];
428}
429
430static const char* getShaderTypePostfix (glu::ShaderType shaderType)
431{
432	static const char* s_postfix[] =
433	{
434		"_vertex",
435		"_fragment",
436		"_geometry",
437		"_tess_control",
438		"_tess_eval",
439		"_compute"
440	};
441	DE_ASSERT(de::inBounds<int>(shaderType, 0, DE_LENGTH_OF_ARRAY(s_postfix)));
442	return s_postfix[shaderType];
443}
444
445static std::string getCommonFuncCaseName (glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
446{
447	return string(glu::getDataTypeName(baseType)) + getPrecisionPostfix(precision) + getShaderTypePostfix(shaderType);
448}
449
450class AbsCase : public CommonFunctionCase
451{
452public:
453	AbsCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
454		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "abs", shaderType)
455	{
456		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
457		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
458		m_spec.source = "out0 = abs(in0);";
459	}
460
461	void getInputValues (int numValues, void* const* values) const
462	{
463		const Vec2 floatRanges[] =
464		{
465			Vec2(-2.0f,		2.0f),	// lowp
466			Vec2(-1e3f,		1e3f),	// mediump
467			Vec2(-1e7f,		1e7f)	// highp
468		};
469		const IVec2 intRanges[] =
470		{
471			IVec2(-(1<<7)+1,	(1<<7)-1),
472			IVec2(-(1<<15)+1,	(1<<15)-1),
473			IVec2(0x80000001,	0x7fffffff)
474		};
475
476		de::Random				rnd			(deStringHash(getName()) ^ 0x235facu);
477		const glu::DataType		type		= m_spec.inputs[0].varType.getBasicType();
478		const glu::Precision	precision	= m_spec.inputs[0].varType.getPrecision();
479		const int				scalarSize	= glu::getDataTypeScalarSize(type);
480
481		if (glu::isDataTypeFloatOrVec(type))
482			fillRandomScalars(rnd, floatRanges[precision].x(), floatRanges[precision].y(), values[0], numValues*scalarSize);
483		else
484			fillRandomScalars(rnd, intRanges[precision].x(), intRanges[precision].y(), values[0], numValues*scalarSize);
485	}
486
487	bool compare (const void* const* inputs, const void* const* outputs)
488	{
489		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
490		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
491		const int				scalarSize		= glu::getDataTypeScalarSize(type);
492
493		if (glu::isDataTypeFloatOrVec(type))
494		{
495			const int		mantissaBits	= getMinMantissaBits(precision);
496			const deUint32	maxUlpDiff		= (1u<<(23-mantissaBits))-1u;
497
498			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
499			{
500				const float		in0			= ((const float*)inputs[0])[compNdx];
501				const float		out0		= ((const float*)outputs[0])[compNdx];
502				const float		ref0		= de::abs(in0);
503				const deUint32	ulpDiff0	= getUlpDiff(out0, ref0);
504
505				if (ulpDiff0 > maxUlpDiff)
506				{
507					m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref0) << " with ULP threshold " << maxUlpDiff << ", got ULP diff " << ulpDiff0;
508					return false;
509				}
510			}
511		}
512		else
513		{
514			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
515			{
516				const int	in0		= ((const int*)inputs[0])[compNdx];
517				const int	out0	= ((const int*)outputs[0])[compNdx];
518				const int	ref0	= de::abs(in0);
519
520				if (out0 != ref0)
521				{
522					m_failMsg << "Expected [" << compNdx << "] = " << ref0;
523					return false;
524				}
525			}
526		}
527
528		return true;
529	}
530};
531
532class SignCase : public CommonFunctionCase
533{
534public:
535	SignCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
536		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "sign", shaderType)
537	{
538		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
539		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
540		m_spec.source = "out0 = sign(in0);";
541	}
542
543	void getInputValues (int numValues, void* const* values) const
544	{
545		const Vec2 floatRanges[] =
546		{
547			Vec2(-2.0f,		2.0f),	// lowp
548			Vec2(-1e4f,		1e4f),	// mediump	- note: may end up as inf
549			Vec2(-1e8f,		1e8f)	// highp	- note: may end up as inf
550		};
551		const IVec2 intRanges[] =
552		{
553			IVec2(-(1<<7),		(1<<7)-1),
554			IVec2(-(1<<15),		(1<<15)-1),
555			IVec2(0x80000000,	0x7fffffff)
556		};
557
558		de::Random				rnd			(deStringHash(getName()) ^ 0x324u);
559		const glu::DataType		type		= m_spec.inputs[0].varType.getBasicType();
560		const glu::Precision	precision	= m_spec.inputs[0].varType.getPrecision();
561		const int				scalarSize	= glu::getDataTypeScalarSize(type);
562
563		if (glu::isDataTypeFloatOrVec(type))
564		{
565			// Special cases.
566			std::fill((float*)values[0], (float*)values[0] + scalarSize, +1.0f);
567			std::fill((float*)values[0], (float*)values[0] + scalarSize, -1.0f);
568			std::fill((float*)values[0], (float*)values[0] + scalarSize,  0.0f);
569			fillRandomScalars(rnd, floatRanges[precision].x(), floatRanges[precision].y(), (float*)values[0] + scalarSize*3, (numValues-3)*scalarSize);
570		}
571		else
572		{
573			std::fill((int*)values[0], (int*)values[0] + scalarSize, +1);
574			std::fill((int*)values[0], (int*)values[0] + scalarSize, -1);
575			std::fill((int*)values[0], (int*)values[0] + scalarSize,  0);
576			fillRandomScalars(rnd, intRanges[precision].x(), intRanges[precision].y(), (int*)values[0] + scalarSize*3, (numValues-3)*scalarSize);
577		}
578	}
579
580	bool compare (const void* const* inputs, const void* const* outputs)
581	{
582		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
583		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
584		const int				scalarSize		= glu::getDataTypeScalarSize(type);
585
586		if (glu::isDataTypeFloatOrVec(type))
587		{
588			// Both highp and mediump should be able to represent -1, 0, and +1 exactly
589			const deUint32 maxUlpDiff = precision == glu::PRECISION_LOWP ? getMaxUlpDiffFromBits(getMinMantissaBits(precision)) : 0;
590
591			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
592			{
593				const float		in0			= ((const float*)inputs[0])[compNdx];
594				const float		out0		= ((const float*)outputs[0])[compNdx];
595				const float		ref0		= in0 < 0.0f ? -1.0f :
596											  in0 > 0.0f ? +1.0f : 0.0f;
597				const deUint32	ulpDiff0	= getUlpDiff(out0, ref0);
598
599				if (ulpDiff0 > maxUlpDiff)
600				{
601					m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref0) << " with ULP threshold " << maxUlpDiff << ", got ULP diff " << ulpDiff0;
602					return false;
603				}
604			}
605		}
606		else
607		{
608			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
609			{
610				const int	in0		= ((const int*)inputs[0])[compNdx];
611				const int	out0	= ((const int*)outputs[0])[compNdx];
612				const int	ref0	= in0 < 0 ? -1 :
613									  in0 > 0 ? +1 : 0;
614
615				if (out0 != ref0)
616				{
617					m_failMsg << "Expected [" << compNdx << "] = " << ref0;
618					return false;
619				}
620			}
621		}
622
623		return true;
624	}
625};
626
627static float roundEven (float v)
628{
629	const float		q			= deFloatFrac(v);
630	const int		truncated	= int(v-q);
631	const int		rounded		= (q > 0.5f)							? (truncated + 1) :	// Rounded up
632									(q == 0.5f && (truncated % 2 != 0))	? (truncated + 1) :	// Round to nearest even at 0.5
633									truncated;												// Rounded down
634
635	return float(rounded);
636}
637
638class RoundEvenCase : public CommonFunctionCase
639{
640public:
641	RoundEvenCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
642		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "roundEven", shaderType)
643	{
644		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
645		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
646		m_spec.source = "out0 = roundEven(in0);";
647	}
648
649	void getInputValues (int numValues, void* const* values) const
650	{
651		const Vec2 ranges[] =
652		{
653			Vec2(-2.0f,		2.0f),	// lowp
654			Vec2(-1e3f,		1e3f),	// mediump
655			Vec2(-1e7f,		1e7f)	// highp
656		};
657
658		de::Random				rnd				(deStringHash(getName()) ^ 0xac23fu);
659		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
660		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
661		const int				scalarSize		= glu::getDataTypeScalarSize(type);
662		int						numSpecialCases	= 0;
663
664		// Special cases.
665		if (precision != glu::PRECISION_LOWP)
666		{
667			DE_ASSERT(numValues >= 20);
668			for (int ndx = 0; ndx < 20; ndx++)
669			{
670				const float v = de::clamp(float(ndx) - 10.5f, ranges[precision].x(), ranges[precision].y());
671				std::fill((float*)values[0], (float*)values[0] + scalarSize, v);
672				numSpecialCases += 1;
673			}
674		}
675
676		// Random cases.
677		fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0] + numSpecialCases*scalarSize, (numValues-numSpecialCases)*scalarSize);
678
679		// If precision is mediump, make sure values can be represented in fp16 exactly
680		if (precision == glu::PRECISION_MEDIUMP)
681		{
682			for (int ndx = 0; ndx < numValues*scalarSize; ndx++)
683				((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat();
684		}
685	}
686
687	bool compare (const void* const* inputs, const void* const* outputs)
688	{
689		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
690		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
691		const bool				hasSignedZero	= supportsSignedZero(precision);
692		const int				scalarSize		= glu::getDataTypeScalarSize(type);
693
694		if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP)
695		{
696			// Require exact rounding result.
697			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
698			{
699				const float		in0			= ((const float*)inputs[0])[compNdx];
700				const float		out0		= ((const float*)outputs[0])[compNdx];
701				const float		ref			= roundEven(in0);
702
703				const deUint32	ulpDiff		= hasSignedZero ? getUlpDiff(out0, ref) : getUlpDiffIgnoreZeroSign(out0, ref);
704
705				if (ulpDiff > 0)
706				{
707					m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff);
708					return false;
709				}
710			}
711		}
712		else
713		{
714			const int		mantissaBits	= getMinMantissaBits(precision);
715			const deUint32	maxUlpDiff		= getMaxUlpDiffFromBits(mantissaBits);	// ULP diff for rounded integer value.
716			const float		eps				= getEpsFromBits(1.0f, mantissaBits);	// epsilon for rounding bounds
717
718			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
719			{
720				const float		in0			= ((const float*)inputs[0])[compNdx];
721				const float		out0		= ((const float*)outputs[0])[compNdx];
722				const int		minRes		= int(roundEven(in0-eps));
723				const int		maxRes		= int(roundEven(in0+eps));
724				bool			anyOk		= false;
725
726				for (int roundedVal = minRes; roundedVal <= maxRes; roundedVal++)
727				{
728					const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(out0, float(roundedVal));
729
730					if (ulpDiff <= maxUlpDiff)
731					{
732						anyOk = true;
733						break;
734					}
735				}
736
737				if (!anyOk)
738				{
739					m_failMsg << "Expected [" << compNdx << "] = [" << minRes << ", " << maxRes << "] with ULP threshold " << tcu::toHex(maxUlpDiff);
740					return false;
741				}
742			}
743		}
744
745		return true;
746	}
747};
748
749class ModfCase : public CommonFunctionCase
750{
751public:
752	ModfCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
753		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "modf", shaderType)
754	{
755		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
756		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
757		m_spec.outputs.push_back(Symbol("out1", glu::VarType(baseType, precision)));
758		m_spec.source = "out0 = modf(in0, out1);";
759	}
760
761	void getInputValues (int numValues, void* const* values) const
762	{
763		const Vec2 ranges[] =
764		{
765			Vec2(-2.0f,		2.0f),	// lowp
766			Vec2(-1e3f,		1e3f),	// mediump
767			Vec2(-1e7f,		1e7f)	// highp
768		};
769
770		de::Random				rnd			(deStringHash(getName()) ^ 0xac23fu);
771		const glu::DataType		type		= m_spec.inputs[0].varType.getBasicType();
772		const glu::Precision	precision	= m_spec.inputs[0].varType.getPrecision();
773		const int				scalarSize	= glu::getDataTypeScalarSize(type);
774
775		fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), values[0], numValues*scalarSize);
776	}
777
778	bool compare (const void* const* inputs, const void* const* outputs)
779	{
780		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
781		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
782		const bool				hasZeroSign		= supportsSignedZero(precision);
783		const int				scalarSize		= glu::getDataTypeScalarSize(type);
784
785		const int				mantissaBits	= getMinMantissaBits(precision);
786
787		for (int compNdx = 0; compNdx < scalarSize; compNdx++)
788		{
789			const float		in0			= ((const float*)inputs[0])[compNdx];
790			const float		out0		= ((const float*)outputs[0])[compNdx];
791			const float		out1		= ((const float*)outputs[1])[compNdx];
792
793			const float		refOut1		= float(int(in0));
794			const float		refOut0		= in0 - refOut1;
795
796			const int		bitsLost	= precision != glu::PRECISION_HIGHP ? numBitsLostInOp(in0, refOut0) : 0;
797			const deUint32	maxUlpDiff	= getMaxUlpDiffFromBits(de::max(mantissaBits - bitsLost, 0));
798
799			const float		resSum		= out0 + out1;
800
801			const deUint32	ulpDiff		= hasZeroSign ? getUlpDiff(resSum, in0) : getUlpDiffIgnoreZeroSign(resSum, in0);
802
803			if (ulpDiff > maxUlpDiff)
804			{
805				m_failMsg << "Expected [" << compNdx << "] = (" << HexFloat(refOut0) << ") + (" << HexFloat(refOut1) << ") = " << HexFloat(in0) << " with ULP threshold "
806							<< tcu::toHex(maxUlpDiff) << ", got ULP diff " << tcu::toHex(ulpDiff);
807				return false;
808			}
809		}
810
811		return true;
812	}
813};
814
815class IsnanCase : public CommonFunctionCase
816{
817public:
818	IsnanCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
819		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "isnan", shaderType)
820	{
821		DE_ASSERT(glu::isDataTypeFloatOrVec(baseType));
822
823		const int			vecSize		= glu::getDataTypeScalarSize(baseType);
824		const glu::DataType	boolType	= vecSize > 1 ? glu::getDataTypeBoolVec(vecSize) : glu::TYPE_BOOL;
825
826		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
827		m_spec.outputs.push_back(Symbol("out0", glu::VarType(boolType, glu::PRECISION_LAST)));
828		m_spec.source = "out0 = isnan(in0);";
829	}
830
831	void getInputValues (int numValues, void* const* values) const
832	{
833		de::Random				rnd				(deStringHash(getName()) ^ 0xc2a39fu);
834		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
835		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
836		const int				scalarSize		= glu::getDataTypeScalarSize(type);
837		const int				mantissaBits	= getMinMantissaBits(precision);
838		const deUint32			mantissaMask	= ~getMaxUlpDiffFromBits(mantissaBits) & ((1u<<23)-1u);
839
840		for (int valNdx = 0; valNdx < numValues*scalarSize; valNdx++)
841		{
842			const bool		isNan		= rnd.getFloat() > 0.3f;
843			const bool		isInf		= !isNan && rnd.getFloat() > 0.4f;
844			const deUint32	mantissa	= !isInf ? ((1u<<22) | (rnd.getUint32() & mantissaMask)) : 0;
845			const deUint32	exp			= !isNan && !isInf ? (rnd.getUint32() & 0x7fu) : 0xffu;
846			const deUint32	sign		= rnd.getUint32() & 0x1u;
847			const deUint32	value		= (sign << 31) | (exp << 23) | mantissa;
848
849			DE_ASSERT(tcu::Float32(value).isInf() == isInf && tcu::Float32(value).isNaN() == isNan);
850
851			((deUint32*)values[0])[valNdx] = value;
852		}
853	}
854
855	bool compare (const void* const* inputs, const void* const* outputs)
856	{
857		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
858		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
859		const int				scalarSize		= glu::getDataTypeScalarSize(type);
860
861		if (precision == glu::PRECISION_HIGHP)
862		{
863			// Only highp is required to support inf/nan
864			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
865			{
866				const float		in0		= ((const float*)inputs[0])[compNdx];
867				const bool		out0	= ((const deUint32*)outputs[0])[compNdx] != 0;
868				const bool		ref		= tcu::Float32(in0).isNaN();
869
870				if (out0 != ref)
871				{
872					m_failMsg << "Expected [" << compNdx << "] = " << (ref ? "true" : "false");
873					return false;
874				}
875			}
876		}
877		else if (precision == glu::PRECISION_MEDIUMP || precision == glu::PRECISION_LOWP)
878		{
879			// NaN support is optional, check that inputs that are not NaN don't result in true.
880			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
881			{
882				const float		in0		= ((const float*)inputs[0])[compNdx];
883				const bool		out0	= ((const deUint32*)outputs[0])[compNdx] != 0;
884				const bool		ref		= tcu::Float32(in0).isNaN();
885
886				if (!ref && out0)
887				{
888					m_failMsg << "Expected [" << compNdx << "] = " << (ref ? "true" : "false");
889					return false;
890				}
891			}
892		}
893
894		return true;
895	}
896};
897
898class IsinfCase : public CommonFunctionCase
899{
900public:
901	IsinfCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
902		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "isinf", shaderType)
903	{
904		DE_ASSERT(glu::isDataTypeFloatOrVec(baseType));
905
906		const int			vecSize		= glu::getDataTypeScalarSize(baseType);
907		const glu::DataType	boolType	= vecSize > 1 ? glu::getDataTypeBoolVec(vecSize) : glu::TYPE_BOOL;
908
909		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
910		m_spec.outputs.push_back(Symbol("out0", glu::VarType(boolType, glu::PRECISION_LAST)));
911		m_spec.source = "out0 = isinf(in0);";
912	}
913
914	void getInputValues (int numValues, void* const* values) const
915	{
916		de::Random				rnd				(deStringHash(getName()) ^ 0xc2a39fu);
917		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
918		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
919		const int				scalarSize		= glu::getDataTypeScalarSize(type);
920		const int				mantissaBits	= getMinMantissaBits(precision);
921		const deUint32			mantissaMask	= ~getMaxUlpDiffFromBits(mantissaBits) & ((1u<<23)-1u);
922
923		for (int valNdx = 0; valNdx < numValues*scalarSize; valNdx++)
924		{
925			const bool		isInf		= rnd.getFloat() > 0.3f;
926			const bool		isNan		= !isInf && rnd.getFloat() > 0.4f;
927			const deUint32	mantissa	= !isInf ? ((1u<<22) | (rnd.getUint32() & mantissaMask)) : 0;
928			const deUint32	exp			= !isNan && !isInf ? (rnd.getUint32() & 0x7fu) : 0xffu;
929			const deUint32	sign		= rnd.getUint32() & 0x1u;
930			const deUint32	value		= (sign << 31) | (exp << 23) | mantissa;
931
932			DE_ASSERT(tcu::Float32(value).isInf() == isInf && tcu::Float32(value).isNaN() == isNan);
933
934			((deUint32*)values[0])[valNdx] = value;
935		}
936	}
937
938	bool compare (const void* const* inputs, const void* const* outputs)
939	{
940		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
941		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
942		const int				scalarSize		= glu::getDataTypeScalarSize(type);
943
944		if (precision == glu::PRECISION_HIGHP)
945		{
946			// Only highp is required to support inf/nan
947			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
948			{
949				const float		in0		= ((const float*)inputs[0])[compNdx];
950				const bool		out0	= ((const deUint32*)outputs[0])[compNdx] != 0;
951				const bool		ref		= tcu::Float32(in0).isInf();
952
953				if (out0 != ref)
954				{
955					m_failMsg << "Expected [" << compNdx << "] = " << HexBool(ref);
956					return false;
957				}
958			}
959		}
960		else if (precision == glu::PRECISION_MEDIUMP)
961		{
962			// Inf support is optional, check that inputs that are not Inf in mediump don't result in true.
963			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
964			{
965				const float		in0		= ((const float*)inputs[0])[compNdx];
966				const bool		out0	= ((const deUint32*)outputs[0])[compNdx] != 0;
967				const bool		ref		= tcu::Float16(in0).isInf();
968
969				if (!ref && out0)
970				{
971					m_failMsg << "Expected [" << compNdx << "] = " << (ref ? "true" : "false");
972					return false;
973				}
974			}
975		}
976		// else: no verification can be performed
977
978		return true;
979	}
980};
981
982class FloatBitsToUintIntCase : public CommonFunctionCase
983{
984public:
985	FloatBitsToUintIntCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType, bool outIsSigned)
986		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), outIsSigned ? "floatBitsToInt" : "floatBitsToUint", shaderType)
987	{
988		const int			vecSize		= glu::getDataTypeScalarSize(baseType);
989		const glu::DataType	intType		= outIsSigned ? (vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT)
990													  : (vecSize > 1 ? glu::getDataTypeUintVec(vecSize) : glu::TYPE_UINT);
991
992		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
993		m_spec.outputs.push_back(Symbol("out0", glu::VarType(intType, glu::PRECISION_HIGHP)));
994		m_spec.source = outIsSigned ? "out0 = floatBitsToInt(in0);" : "out0 = floatBitsToUint(in0);";
995	}
996
997	void getInputValues (int numValues, void* const* values) const
998	{
999		const Vec2 ranges[] =
1000		{
1001			Vec2(-2.0f,		2.0f),	// lowp
1002			Vec2(-1e3f,		1e3f),	// mediump
1003			Vec2(-1e7f,		1e7f)	// highp
1004		};
1005
1006		de::Random				rnd			(deStringHash(getName()) ^ 0x2790au);
1007		const glu::DataType		type		= m_spec.inputs[0].varType.getBasicType();
1008		const glu::Precision	precision	= m_spec.inputs[0].varType.getPrecision();
1009		const int				scalarSize	= glu::getDataTypeScalarSize(type);
1010
1011		fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), values[0], numValues*scalarSize);
1012	}
1013
1014	bool compare (const void* const* inputs, const void* const* outputs)
1015	{
1016		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1017		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1018		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1019
1020		const int				mantissaBits	= getMinMantissaBits(precision);
1021		const int				maxUlpDiff		= getMaxUlpDiffFromBits(mantissaBits);
1022
1023		for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1024		{
1025			const float		in0			= ((const float*)inputs[0])[compNdx];
1026			const deUint32	out0		= ((const deUint32*)outputs[0])[compNdx];
1027			const deUint32	refOut0		= tcu::Float32(in0).bits();
1028			const int		ulpDiff		= de::abs((int)out0 - (int)refOut0);
1029
1030			if (ulpDiff > maxUlpDiff)
1031			{
1032				m_failMsg << "Expected [" << compNdx << "] = " << tcu::toHex(refOut0) << " with threshold "
1033							<< tcu::toHex(maxUlpDiff) << ", got diff " << tcu::toHex(ulpDiff);
1034				return false;
1035			}
1036		}
1037
1038		return true;
1039	}
1040};
1041
1042class FloatBitsToIntCase : public FloatBitsToUintIntCase
1043{
1044public:
1045	FloatBitsToIntCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
1046		: FloatBitsToUintIntCase(context, baseType, precision, shaderType, true)
1047	{
1048	}
1049};
1050
1051class FloatBitsToUintCase : public FloatBitsToUintIntCase
1052{
1053public:
1054	FloatBitsToUintCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
1055		: FloatBitsToUintIntCase(context, baseType, precision, shaderType, false)
1056	{
1057	}
1058};
1059
1060class BitsToFloatCase : public CommonFunctionCase
1061{
1062public:
1063	BitsToFloatCase (Context& context, glu::DataType baseType, glu::ShaderType shaderType)
1064		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, glu::PRECISION_HIGHP, shaderType).c_str(), glu::isDataTypeIntOrIVec(baseType) ? "intBitsToFloat" : "uintBitsToFloat", shaderType)
1065	{
1066		const bool			inIsSigned	= glu::isDataTypeIntOrIVec(baseType);
1067		const int			vecSize		= glu::getDataTypeScalarSize(baseType);
1068		const glu::DataType	floatType	= vecSize > 1 ? glu::getDataTypeFloatVec(vecSize) : glu::TYPE_FLOAT;
1069
1070		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, glu::PRECISION_HIGHP)));
1071		m_spec.outputs.push_back(Symbol("out0", glu::VarType(floatType, glu::PRECISION_HIGHP)));
1072		m_spec.source = inIsSigned ? "out0 = intBitsToFloat(in0);" : "out0 = uintBitsToFloat(in0);";
1073	}
1074
1075	void getInputValues (int numValues, void* const* values) const
1076	{
1077		de::Random				rnd			(deStringHash(getName()) ^ 0xbbb225u);
1078		const glu::DataType		type		= m_spec.inputs[0].varType.getBasicType();
1079		const int				scalarSize	= glu::getDataTypeScalarSize(type);
1080		const Vec2				range		(-1e8f, +1e8f);
1081
1082		// \note Filled as floats.
1083		fillRandomScalars(rnd, range.x(), range.y(), values[0], numValues*scalarSize);
1084	}
1085
1086	bool compare (const void* const* inputs, const void* const* outputs)
1087	{
1088		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1089		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1090		const int				maxUlpDiff		= 0;
1091
1092		for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1093		{
1094			const float		in0			= ((const float*)inputs[0])[compNdx];
1095			const float		out0		= ((const float*)outputs[0])[compNdx];
1096			const int		ulpDiff		= de::abs((int)in0 - (int)out0);
1097
1098			if (ulpDiff > maxUlpDiff)
1099			{
1100				m_failMsg << "Expected [" << compNdx << "] = " << tcu::toHex(in0) << " with ULP threshold "
1101							<< tcu::toHex(maxUlpDiff) << ", got ULP diff " << tcu::toHex(ulpDiff);
1102				return false;
1103			}
1104		}
1105
1106		return true;
1107	}
1108};
1109
1110class FloorCase : public CommonFunctionCase
1111{
1112public:
1113	FloorCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
1114		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "floor", shaderType)
1115	{
1116		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
1117		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
1118		m_spec.source = "out0 = floor(in0);";
1119	}
1120
1121	void getInputValues (int numValues, void* const* values) const
1122	{
1123		const Vec2 ranges[] =
1124		{
1125			Vec2(-2.0f,		2.0f),	// lowp
1126			Vec2(-1e3f,		1e3f),	// mediump
1127			Vec2(-1e7f,		1e7f)	// highp
1128		};
1129
1130		de::Random				rnd			(deStringHash(getName()) ^ 0xac23fu);
1131		const glu::DataType		type		= m_spec.inputs[0].varType.getBasicType();
1132		const glu::Precision	precision	= m_spec.inputs[0].varType.getPrecision();
1133		const int				scalarSize	= glu::getDataTypeScalarSize(type);
1134		// Random cases.
1135		fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0], numValues*scalarSize);
1136
1137		// If precision is mediump, make sure values can be represented in fp16 exactly
1138		if (precision == glu::PRECISION_MEDIUMP)
1139		{
1140			for (int ndx = 0; ndx < numValues*scalarSize; ndx++)
1141				((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat();
1142		}
1143	}
1144
1145	bool compare (const void* const* inputs, const void* const* outputs)
1146	{
1147		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1148		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1149		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1150
1151		if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP)
1152		{
1153			// Require exact result.
1154			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1155			{
1156				const float		in0			= ((const float*)inputs[0])[compNdx];
1157				const float		out0		= ((const float*)outputs[0])[compNdx];
1158				const float		ref			= deFloatFloor(in0);
1159
1160				const deUint32	ulpDiff		= getUlpDiff(out0, ref);
1161
1162				if (ulpDiff > 0)
1163				{
1164					m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff);
1165					return false;
1166				}
1167			}
1168		}
1169		else
1170		{
1171			const int		mantissaBits	= getMinMantissaBits(precision);
1172			const deUint32	maxUlpDiff		= getMaxUlpDiffFromBits(mantissaBits);	// ULP diff for rounded integer value.
1173			const float		eps				= getEpsFromBits(1.0f, mantissaBits);	// epsilon for rounding bounds
1174
1175			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1176			{
1177				const float		in0			= ((const float*)inputs[0])[compNdx];
1178				const float		out0		= ((const float*)outputs[0])[compNdx];
1179				const int		minRes		= int(deFloatFloor(in0-eps));
1180				const int		maxRes		= int(deFloatFloor(in0+eps));
1181				bool			anyOk		= false;
1182
1183				for (int roundedVal = minRes; roundedVal <= maxRes; roundedVal++)
1184				{
1185					const deUint32 ulpDiff = getUlpDiff(out0, float(roundedVal));
1186
1187					if (ulpDiff <= maxUlpDiff)
1188					{
1189						anyOk = true;
1190						break;
1191					}
1192				}
1193
1194				if (!anyOk)
1195				{
1196					m_failMsg << "Expected [" << compNdx << "] = [" << minRes << ", " << maxRes << "] with ULP threshold " << tcu::toHex(maxUlpDiff);
1197					return false;
1198				}
1199			}
1200		}
1201
1202		return true;
1203	}
1204};
1205
1206class TruncCase : public CommonFunctionCase
1207{
1208public:
1209	TruncCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
1210		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "trunc", shaderType)
1211	{
1212		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
1213		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
1214		m_spec.source = "out0 = trunc(in0);";
1215	}
1216
1217	void getInputValues (int numValues, void* const* values) const
1218	{
1219		const Vec2 ranges[] =
1220		{
1221			Vec2(-2.0f,		2.0f),	// lowp
1222			Vec2(-1e3f,		1e3f),	// mediump
1223			Vec2(-1e7f,		1e7f)	// highp
1224		};
1225
1226		de::Random				rnd				(deStringHash(getName()) ^ 0xac23fu);
1227		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1228		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1229		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1230		const float				specialCases[]	= { 0.0f, -0.0f, -0.9f, 0.9f, 1.0f, -1.0f };
1231		const int				numSpecialCases	= DE_LENGTH_OF_ARRAY(specialCases);
1232
1233		// Special cases
1234		for (int caseNdx = 0; caseNdx < numSpecialCases; caseNdx++)
1235		{
1236			for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++)
1237				((float*)values[0])[caseNdx*scalarSize + scalarNdx] = specialCases[caseNdx];
1238		}
1239
1240		// Random cases.
1241		fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0] + scalarSize*numSpecialCases, (numValues-numSpecialCases)*scalarSize);
1242
1243		// If precision is mediump, make sure values can be represented in fp16 exactly
1244		if (precision == glu::PRECISION_MEDIUMP)
1245		{
1246			for (int ndx = 0; ndx < numValues*scalarSize; ndx++)
1247				((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat();
1248		}
1249	}
1250
1251	bool compare (const void* const* inputs, const void* const* outputs)
1252	{
1253		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1254		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1255		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1256
1257		if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP)
1258		{
1259			// Require exact result.
1260			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1261			{
1262				const float		in0			= ((const float*)inputs[0])[compNdx];
1263				const float		out0		= ((const float*)outputs[0])[compNdx];
1264				const bool		isNeg		= tcu::Float32(in0).sign() < 0;
1265				const float		ref			= isNeg ? (-float(int(-in0))) : float(int(in0));
1266
1267				// \note: trunc() function definition is a bit broad on negative zeros. Ignore result sign if zero.
1268				const deUint32	ulpDiff		= getUlpDiffIgnoreZeroSign(out0, ref);
1269
1270				if (ulpDiff > 0)
1271				{
1272					m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff);
1273					return false;
1274				}
1275			}
1276		}
1277		else
1278		{
1279			const int		mantissaBits	= getMinMantissaBits(precision);
1280			const deUint32	maxUlpDiff		= getMaxUlpDiffFromBits(mantissaBits);	// ULP diff for rounded integer value.
1281			const float		eps				= getEpsFromBits(1.0f, mantissaBits);	// epsilon for rounding bounds
1282
1283			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1284			{
1285				const float		in0			= ((const float*)inputs[0])[compNdx];
1286				const float		out0		= ((const float*)outputs[0])[compNdx];
1287				const int		minRes		= int(in0-eps);
1288				const int		maxRes		= int(in0+eps);
1289				bool			anyOk		= false;
1290
1291				for (int roundedVal = minRes; roundedVal <= maxRes; roundedVal++)
1292				{
1293					const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(out0, float(roundedVal));
1294
1295					if (ulpDiff <= maxUlpDiff)
1296					{
1297						anyOk = true;
1298						break;
1299					}
1300				}
1301
1302				if (!anyOk)
1303				{
1304					m_failMsg << "Expected [" << compNdx << "] = [" << minRes << ", " << maxRes << "] with ULP threshold " << tcu::toHex(maxUlpDiff);
1305					return false;
1306				}
1307			}
1308		}
1309
1310		return true;
1311	}
1312};
1313
1314class RoundCase : public CommonFunctionCase
1315{
1316public:
1317	RoundCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
1318		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "round", shaderType)
1319	{
1320		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
1321		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
1322		m_spec.source = "out0 = round(in0);";
1323	}
1324
1325	void getInputValues (int numValues, void* const* values) const
1326	{
1327		const Vec2 ranges[] =
1328		{
1329			Vec2(-2.0f,		2.0f),	// lowp
1330			Vec2(-1e3f,		1e3f),	// mediump
1331			Vec2(-1e7f,		1e7f)	// highp
1332		};
1333
1334		de::Random				rnd				(deStringHash(getName()) ^ 0xac23fu);
1335		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1336		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1337		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1338		int						numSpecialCases	= 0;
1339
1340		// Special cases.
1341		if (precision != glu::PRECISION_LOWP)
1342		{
1343			DE_ASSERT(numValues >= 10);
1344			for (int ndx = 0; ndx < 10; ndx++)
1345			{
1346				const float v = de::clamp(float(ndx) - 5.5f, ranges[precision].x(), ranges[precision].y());
1347				std::fill((float*)values[0], (float*)values[0] + scalarSize, v);
1348				numSpecialCases += 1;
1349			}
1350		}
1351
1352		// Random cases.
1353		fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0] + numSpecialCases*scalarSize, (numValues-numSpecialCases)*scalarSize);
1354
1355		// If precision is mediump, make sure values can be represented in fp16 exactly
1356		if (precision == glu::PRECISION_MEDIUMP)
1357		{
1358			for (int ndx = 0; ndx < numValues*scalarSize; ndx++)
1359				((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat();
1360		}
1361	}
1362
1363	bool compare (const void* const* inputs, const void* const* outputs)
1364	{
1365		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1366		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1367		const bool				hasZeroSign		= supportsSignedZero(precision);
1368		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1369
1370		if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP)
1371		{
1372			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1373			{
1374				const float		in0			= ((const float*)inputs[0])[compNdx];
1375				const float		out0		= ((const float*)outputs[0])[compNdx];
1376
1377				if (deFloatFrac(in0) == 0.5f)
1378				{
1379					// Allow both ceil(in) and floor(in)
1380					const float		ref0		= deFloatFloor(in0);
1381					const float		ref1		= deFloatCeil(in0);
1382					const deUint32	ulpDiff0	= hasZeroSign ? getUlpDiff(out0, ref0) : getUlpDiffIgnoreZeroSign(out0, ref0);
1383					const deUint32	ulpDiff1	= hasZeroSign ? getUlpDiff(out0, ref1) : getUlpDiffIgnoreZeroSign(out0, ref1);
1384
1385					if (ulpDiff0 > 0 && ulpDiff1 > 0)
1386					{
1387						m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref0) << " or " << HexFloat(ref1) << ", got ULP diff " << tcu::toHex(de::min(ulpDiff0, ulpDiff1));
1388						return false;
1389					}
1390				}
1391				else
1392				{
1393					// Require exact result
1394					const float		ref		= roundEven(in0);
1395					const deUint32	ulpDiff	= hasZeroSign ? getUlpDiff(out0, ref) : getUlpDiffIgnoreZeroSign(out0, ref);
1396
1397					if (ulpDiff > 0)
1398					{
1399						m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff);
1400						return false;
1401					}
1402				}
1403			}
1404		}
1405		else
1406		{
1407			const int		mantissaBits	= getMinMantissaBits(precision);
1408			const deUint32	maxUlpDiff		= getMaxUlpDiffFromBits(mantissaBits);	// ULP diff for rounded integer value.
1409			const float		eps				= getEpsFromBits(1.0f, mantissaBits);	// epsilon for rounding bounds
1410
1411			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1412			{
1413				const float		in0			= ((const float*)inputs[0])[compNdx];
1414				const float		out0		= ((const float*)outputs[0])[compNdx];
1415				const int		minRes		= int(roundEven(in0-eps));
1416				const int		maxRes		= int(roundEven(in0+eps));
1417				bool			anyOk		= false;
1418
1419				for (int roundedVal = minRes; roundedVal <= maxRes; roundedVal++)
1420				{
1421					const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(out0, float(roundedVal));
1422
1423					if (ulpDiff <= maxUlpDiff)
1424					{
1425						anyOk = true;
1426						break;
1427					}
1428				}
1429
1430				if (!anyOk)
1431				{
1432					m_failMsg << "Expected [" << compNdx << "] = [" << minRes << ", " << maxRes << "] with ULP threshold " << tcu::toHex(maxUlpDiff);
1433					return false;
1434				}
1435			}
1436		}
1437
1438		return true;
1439	}
1440};
1441
1442class CeilCase : public CommonFunctionCase
1443{
1444public:
1445	CeilCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
1446		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "ceil", shaderType)
1447	{
1448		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
1449		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
1450		m_spec.source = "out0 = ceil(in0);";
1451	}
1452
1453	void getInputValues (int numValues, void* const* values) const
1454	{
1455		const Vec2 ranges[] =
1456		{
1457			Vec2(-2.0f,		2.0f),	// lowp
1458			Vec2(-1e3f,		1e3f),	// mediump
1459			Vec2(-1e7f,		1e7f)	// highp
1460		};
1461
1462		de::Random				rnd			(deStringHash(getName()) ^ 0xac23fu);
1463		const glu::DataType		type		= m_spec.inputs[0].varType.getBasicType();
1464		const glu::Precision	precision	= m_spec.inputs[0].varType.getPrecision();
1465		const int				scalarSize	= glu::getDataTypeScalarSize(type);
1466
1467		// Random cases.
1468		fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0], numValues*scalarSize);
1469
1470		// If precision is mediump, make sure values can be represented in fp16 exactly
1471		if (precision == glu::PRECISION_MEDIUMP)
1472		{
1473			for (int ndx = 0; ndx < numValues*scalarSize; ndx++)
1474				((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat();
1475		}
1476	}
1477
1478	bool compare (const void* const* inputs, const void* const* outputs)
1479	{
1480		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1481		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1482		const bool				hasZeroSign		= supportsSignedZero(precision);
1483		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1484
1485		if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP)
1486		{
1487			// Require exact result.
1488			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1489			{
1490				const float		in0			= ((const float*)inputs[0])[compNdx];
1491				const float		out0		= ((const float*)outputs[0])[compNdx];
1492				const float		ref			= deFloatCeil(in0);
1493
1494				const deUint32	ulpDiff		= hasZeroSign ? getUlpDiff(out0, ref) : getUlpDiffIgnoreZeroSign(out0, ref);
1495
1496				if (ulpDiff > 0)
1497				{
1498					m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff);
1499					return false;
1500				}
1501			}
1502		}
1503		else
1504		{
1505			const int		mantissaBits	= getMinMantissaBits(precision);
1506			const deUint32	maxUlpDiff		= getMaxUlpDiffFromBits(mantissaBits);	// ULP diff for rounded integer value.
1507			const float		eps				= getEpsFromBits(1.0f, mantissaBits);	// epsilon for rounding bounds
1508
1509			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1510			{
1511				const float		in0			= ((const float*)inputs[0])[compNdx];
1512				const float		out0		= ((const float*)outputs[0])[compNdx];
1513				const int		minRes		= int(deFloatCeil(in0-eps));
1514				const int		maxRes		= int(deFloatCeil(in0+eps));
1515				bool			anyOk		= false;
1516
1517				for (int roundedVal = minRes; roundedVal <= maxRes; roundedVal++)
1518				{
1519					const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(out0, float(roundedVal));
1520
1521					if (ulpDiff <= maxUlpDiff)
1522					{
1523						anyOk = true;
1524						break;
1525					}
1526				}
1527
1528				if (!anyOk && de::inRange(0, minRes, maxRes))
1529				{
1530					// Allow -0 as well.
1531					const int ulpDiff = de::abs((int)tcu::Float32(out0).bits() - (int)0x80000000u);
1532					anyOk = ((deUint32)ulpDiff <= maxUlpDiff);
1533				}
1534
1535				if (!anyOk)
1536				{
1537					m_failMsg << "Expected [" << compNdx << "] = [" << minRes << ", " << maxRes << "] with ULP threshold " << tcu::toHex(maxUlpDiff);
1538					return false;
1539				}
1540			}
1541		}
1542
1543		return true;
1544	}
1545};
1546
1547class FractCase : public CommonFunctionCase
1548{
1549public:
1550	FractCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
1551		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "fract", shaderType)
1552	{
1553		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
1554		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
1555		m_spec.source = "out0 = fract(in0);";
1556	}
1557
1558	void getInputValues (int numValues, void* const* values) const
1559	{
1560		const Vec2 ranges[] =
1561		{
1562			Vec2(-2.0f,		2.0f),	// lowp
1563			Vec2(-1e3f,		1e3f),	// mediump
1564			Vec2(-1e7f,		1e7f)	// highp
1565		};
1566
1567		de::Random				rnd				(deStringHash(getName()) ^ 0xac23fu);
1568		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1569		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1570		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1571		int						numSpecialCases	= 0;
1572
1573		// Special cases.
1574		if (precision != glu::PRECISION_LOWP)
1575		{
1576			DE_ASSERT(numValues >= 10);
1577			for (int ndx = 0; ndx < 10; ndx++)
1578			{
1579				const float v = de::clamp(float(ndx) - 5.5f, ranges[precision].x(), ranges[precision].y());
1580				std::fill((float*)values[0], (float*)values[0] + scalarSize, v);
1581				numSpecialCases += 1;
1582			}
1583		}
1584
1585		// Random cases.
1586		fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0] + numSpecialCases*scalarSize, (numValues-numSpecialCases)*scalarSize);
1587
1588		// If precision is mediump, make sure values can be represented in fp16 exactly
1589		if (precision == glu::PRECISION_MEDIUMP)
1590		{
1591			for (int ndx = 0; ndx < numValues*scalarSize; ndx++)
1592				((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat();
1593		}
1594	}
1595
1596	bool compare (const void* const* inputs, const void* const* outputs)
1597	{
1598		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1599		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1600		const bool				hasZeroSign		= supportsSignedZero(precision);
1601		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1602
1603		if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP)
1604		{
1605			// Require exact result.
1606			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1607			{
1608				const float		in0			= ((const float*)inputs[0])[compNdx];
1609				const float		out0		= ((const float*)outputs[0])[compNdx];
1610				const float		ref			= deFloatFrac(in0);
1611
1612				const deUint32	ulpDiff		= hasZeroSign ? getUlpDiff(out0, ref) : getUlpDiffIgnoreZeroSign(out0, ref);
1613
1614				if (ulpDiff > 0)
1615				{
1616					m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff);
1617					return false;
1618				}
1619			}
1620		}
1621		else
1622		{
1623			const int		mantissaBits	= getMinMantissaBits(precision);
1624			const float		eps				= getEpsFromBits(1.0f, mantissaBits);	// epsilon for rounding bounds
1625
1626			for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1627			{
1628				const float		in0			= ((const float*)inputs[0])[compNdx];
1629				const float		out0		= ((const float*)outputs[0])[compNdx];
1630
1631				if (int(deFloatFloor(in0-eps)) == int(deFloatFloor(in0+eps)))
1632				{
1633					const float		ref			= deFloatFrac(in0);
1634					const int		bitsLost	= numBitsLostInOp(in0, ref);
1635					const deUint32	maxUlpDiff	= getMaxUlpDiffFromBits(de::max(0, mantissaBits-bitsLost));	// ULP diff for rounded integer value.
1636					const deUint32	ulpDiff		= getUlpDiffIgnoreZeroSign(out0, ref);
1637
1638					if (ulpDiff > maxUlpDiff)
1639					{
1640						m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << " with ULP threshold " << tcu::toHex(maxUlpDiff) << ", got diff " << tcu::toHex(ulpDiff);
1641						return false;
1642					}
1643				}
1644				else
1645				{
1646					if (out0 >= 1.0f)
1647					{
1648						m_failMsg << "Expected [" << compNdx << "] < 1.0";
1649						return false;
1650					}
1651				}
1652			}
1653		}
1654
1655		return true;
1656	}
1657};
1658
1659static inline void frexp (float in, float* significand, int* exponent)
1660{
1661	const tcu::Float32 fpValue(in);
1662
1663	if (!fpValue.isZero())
1664	{
1665		// Construct float that has exactly the mantissa, and exponent of -1.
1666		*significand	= tcu::Float32::construct(fpValue.sign(), -1, fpValue.mantissa()).asFloat();
1667		*exponent		= fpValue.exponent()+1;
1668	}
1669	else
1670	{
1671		*significand	= fpValue.sign() < 0 ? -0.0f : 0.0f;
1672		*exponent		= 0;
1673	}
1674}
1675
1676static inline float ldexp (float significand, int exponent)
1677{
1678	const tcu::Float32 mant(significand);
1679
1680	if (exponent == 0 && mant.isZero())
1681	{
1682		return mant.sign() < 0 ? -0.0f : 0.0f;
1683	}
1684	else
1685	{
1686		return tcu::Float32::construct(mant.sign(), exponent+mant.exponent(), mant.mantissa()).asFloat();
1687	}
1688}
1689
1690class FrexpCase : public CommonFunctionCase
1691{
1692public:
1693	FrexpCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
1694		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "frexp", shaderType)
1695	{
1696		const int			vecSize		= glu::getDataTypeScalarSize(baseType);
1697		const glu::DataType	intType		= vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
1698
1699		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
1700		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, glu::PRECISION_HIGHP)));
1701		m_spec.outputs.push_back(Symbol("out1", glu::VarType(intType, glu::PRECISION_HIGHP)));
1702		m_spec.source = "out0 = frexp(in0, out1);";
1703	}
1704
1705	void getInputValues (int numValues, void* const* values) const
1706	{
1707		const Vec2 ranges[] =
1708		{
1709			Vec2(-2.0f,		2.0f),	// lowp
1710			Vec2(-1e3f,		1e3f),	// mediump
1711			Vec2(-1e7f,		1e7f)	// highp
1712		};
1713
1714		de::Random				rnd			(deStringHash(getName()) ^ 0x2790au);
1715		const glu::DataType		type		= m_spec.inputs[0].varType.getBasicType();
1716		const glu::Precision	precision	= m_spec.inputs[0].varType.getPrecision();
1717		const int				scalarSize	= glu::getDataTypeScalarSize(type);
1718
1719		// Special cases
1720		for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1721		{
1722			((float*)values[0])[scalarSize*0 + compNdx] = 0.0f;
1723			((float*)values[0])[scalarSize*1 + compNdx] = -0.0f;
1724			((float*)values[0])[scalarSize*2 + compNdx] = 0.5f;
1725			((float*)values[0])[scalarSize*3 + compNdx] = -0.5f;
1726			((float*)values[0])[scalarSize*4 + compNdx] = 1.0f;
1727			((float*)values[0])[scalarSize*5 + compNdx] = -1.0f;
1728			((float*)values[0])[scalarSize*6 + compNdx] = 2.0f;
1729			((float*)values[0])[scalarSize*7 + compNdx] = -2.0f;
1730		}
1731
1732		fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0] + 8*scalarSize, (numValues-8)*scalarSize);
1733	}
1734
1735	bool compare (const void* const* inputs, const void* const* outputs)
1736	{
1737		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1738		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1739		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1740		const bool				signedZero		= supportsSignedZero(precision);
1741
1742		const int				mantissaBits	= getMinMantissaBits(precision);
1743		const deUint32			maxUlpDiff		= getMaxUlpDiffFromBits(mantissaBits);
1744
1745		for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1746		{
1747			const float		in0			= ((const float*)inputs[0])[compNdx];
1748			const float		out0		= ((const float*)outputs[0])[compNdx];
1749			const int		out1		= ((const int*)outputs[1])[compNdx];
1750
1751			float			refOut0;
1752			int				refOut1;
1753
1754			frexp(in0, &refOut0, &refOut1);
1755
1756			const deUint32	ulpDiff0	= signedZero ? getUlpDiff(out0, refOut0) : getUlpDiffIgnoreZeroSign(out0, refOut0);
1757
1758			if (ulpDiff0 > maxUlpDiff || out1 != refOut1)
1759			{
1760				m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(refOut0) << ", " << refOut1 << " with ULP threshold "
1761						  << tcu::toHex(maxUlpDiff) << ", got ULP diff " << tcu::toHex(ulpDiff0);
1762				return false;
1763			}
1764		}
1765
1766		return true;
1767	}
1768};
1769
1770class LdexpCase : public CommonFunctionCase
1771{
1772public:
1773	LdexpCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
1774		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "ldexp", shaderType)
1775	{
1776		const int			vecSize		= glu::getDataTypeScalarSize(baseType);
1777		const glu::DataType	intType		= vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
1778
1779		m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
1780		m_spec.inputs.push_back(Symbol("in1", glu::VarType(intType, glu::PRECISION_HIGHP)));
1781		m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, glu::PRECISION_HIGHP)));
1782		m_spec.source = "out0 = ldexp(in0, in1);";
1783	}
1784
1785	void getInputValues (int numValues, void* const* values) const
1786	{
1787		const Vec2 ranges[] =
1788		{
1789			Vec2(-2.0f,		2.0f),	// lowp
1790			Vec2(-1e3f,		1e3f),	// mediump
1791			Vec2(-1e7f,		1e7f)	// highp
1792		};
1793
1794		de::Random				rnd					(deStringHash(getName()) ^ 0x2790au);
1795		const glu::DataType		type				= m_spec.inputs[0].varType.getBasicType();
1796		const glu::Precision	precision			= m_spec.inputs[0].varType.getPrecision();
1797		const int				scalarSize			= glu::getDataTypeScalarSize(type);
1798		int						valueNdx			= 0;
1799
1800		{
1801			const float easySpecialCases[] = { 0.0f, -0.0f, 0.5f, -0.5f, 1.0f, -1.0f, 2.0f, -2.0f };
1802
1803			DE_ASSERT(valueNdx + DE_LENGTH_OF_ARRAY(easySpecialCases) <= numValues);
1804			for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(easySpecialCases); caseNdx++)
1805			{
1806				float	in0;
1807				int		in1;
1808
1809				frexp(easySpecialCases[caseNdx], &in0, &in1);
1810
1811				for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1812				{
1813					((float*)values[0])[valueNdx*scalarSize + compNdx] = in0;
1814					((int*)values[1])[valueNdx*scalarSize + compNdx] = in1;
1815				}
1816
1817				valueNdx += 1;
1818			}
1819		}
1820
1821		{
1822			// \note lowp and mediump can not necessarily fit the values in hard cases, so we'll use only easy ones.
1823			const int numEasyRandomCases = precision == glu::PRECISION_HIGHP ? 50 : (numValues-valueNdx);
1824
1825			DE_ASSERT(valueNdx + numEasyRandomCases <= numValues);
1826			for (int caseNdx = 0; caseNdx < numEasyRandomCases; caseNdx++)
1827			{
1828				for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1829				{
1830					const float	in	= rnd.getFloat(ranges[precision].x(), ranges[precision].y());
1831					float		in0;
1832					int			in1;
1833
1834					frexp(in, &in0, &in1);
1835
1836					((float*)values[0])[valueNdx*scalarSize + compNdx] = in0;
1837					((int*)values[1])[valueNdx*scalarSize + compNdx] = in1;
1838				}
1839
1840				valueNdx += 1;
1841			}
1842		}
1843
1844		{
1845			const int numHardRandomCases = numValues-valueNdx;
1846			DE_ASSERT(numHardRandomCases >= 0 && valueNdx + numHardRandomCases <= numValues);
1847
1848			for (int caseNdx = 0; caseNdx < numHardRandomCases; caseNdx++)
1849			{
1850				for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1851				{
1852					const int		fpExp		= rnd.getInt(-126, 127);
1853					const int		sign		= rnd.getBool() ? -1 : +1;
1854					const deUint32	mantissa	= (1u<<23) | (rnd.getUint32() & ((1u<<23)-1));
1855					const int		in1			= rnd.getInt(de::max(-126, -126-fpExp), de::min(127, 127-fpExp));
1856					const float		in0			= tcu::Float32::construct(sign, fpExp, mantissa).asFloat();
1857
1858					DE_ASSERT(de::inRange(in1, -126, 127)); // See Khronos bug 11180
1859					DE_ASSERT(de::inRange(in1+fpExp, -126, 127));
1860
1861					const float		out			= ldexp(in0, in1);
1862
1863					DE_ASSERT(!tcu::Float32(out).isInf() && !tcu::Float32(out).isDenorm());
1864					DE_UNREF(out);
1865
1866					((float*)values[0])[valueNdx*scalarSize + compNdx] = in0;
1867					((int*)values[1])[valueNdx*scalarSize + compNdx] = in1;
1868				}
1869
1870				valueNdx += 1;
1871			}
1872		}
1873	}
1874
1875	bool compare (const void* const* inputs, const void* const* outputs)
1876	{
1877		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
1878		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
1879		const int				scalarSize		= glu::getDataTypeScalarSize(type);
1880
1881		const int				mantissaBits	= getMinMantissaBits(precision);
1882		const deUint32			maxUlpDiff		= getMaxUlpDiffFromBits(mantissaBits);
1883
1884		for (int compNdx = 0; compNdx < scalarSize; compNdx++)
1885		{
1886			const float		in0			= ((const float*)inputs[0])[compNdx];
1887			const int		in1			= ((const int*)inputs[1])[compNdx];
1888			const float		out0		= ((const float*)outputs[0])[compNdx];
1889			const float		refOut0		= ldexp(in0, in1);
1890			const deUint32	ulpDiff		= getUlpDiffIgnoreZeroSign(out0, refOut0);
1891
1892			const int		inExp		= tcu::Float32(in0).exponent();
1893
1894			if (ulpDiff > maxUlpDiff)
1895			{
1896				m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(refOut0) << ", (exp = " << inExp << ") with ULP threshold "
1897						  << tcu::toHex(maxUlpDiff) << ", got ULP diff " << tcu::toHex(ulpDiff);
1898				return false;
1899			}
1900		}
1901
1902		return true;
1903	}
1904};
1905
1906class FmaCase : public CommonFunctionCase
1907{
1908public:
1909	FmaCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
1910		: CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "fma", shaderType)
1911	{
1912		m_spec.inputs.push_back(Symbol("a", glu::VarType(baseType, precision)));
1913		m_spec.inputs.push_back(Symbol("b", glu::VarType(baseType, precision)));
1914		m_spec.inputs.push_back(Symbol("c", glu::VarType(baseType, precision)));
1915		m_spec.outputs.push_back(Symbol("res", glu::VarType(baseType, precision)));
1916		m_spec.source = "res = fma(a, b, c);";
1917		m_spec.globalDeclarations = "#extension GL_EXT_gpu_shader5 : require\n";
1918	}
1919
1920	void init (void)
1921	{
1922		if (!m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"))
1923			throw tcu::NotSupportedError("GL_EXT_gpu_shader5 not supported");
1924
1925		CommonFunctionCase::init();
1926	}
1927
1928	void getInputValues (int numValues, void* const* values) const
1929	{
1930		const Vec2 ranges[] =
1931		{
1932			Vec2(-2.0f,		2.0f),	// lowp
1933			Vec2(-127.f,	127.f),	// mediump
1934			Vec2(-1e7f,		1e7f)	// highp
1935		};
1936
1937		de::Random				rnd							(deStringHash(getName()) ^ 0xac23fu);
1938		const glu::DataType		type						= m_spec.inputs[0].varType.getBasicType();
1939		const glu::Precision	precision					= m_spec.inputs[0].varType.getPrecision();
1940		const int				scalarSize					= glu::getDataTypeScalarSize(type);
1941		const int				numMantissaBits				= getMinMantissaBits(precision);
1942		const int				maxNormalizedValueExponent	= getMaxNormalizedValueExponent(precision);
1943		const int				minNormalizedValueExponent	= getMinNormalizedValueExponent(precision);
1944		const deUint32			representableMantissaMask	= ((deUint32(1) << numMantissaBits) - 1) << (23 - (deUint32)numMantissaBits);
1945		const float				specialCases[][3]			=
1946		{
1947			// a		b		c
1948			{ 0.0f,		0.0f,	0.0f },
1949			{ 0.0f,		1.0f,	0.0f },
1950			{ 0.0f,		0.0f,	-1.0f },
1951			{ 1.0f,		1.0f,	0.0f },
1952			{ 1.0f,		1.0f,	1.0f },
1953			{ -1.0f,	1.0f,	0.0f },
1954			{ 1.0f,		-1.0f,	0.0f },
1955			{ -1.0f,	-1.0f,	0.0f },
1956			{ -0.0f,	1.0f,	0.0f },
1957			{ 1.0f,		-0.0f,	0.0f }
1958		};
1959		const int				numSpecialCases				= DE_LENGTH_OF_ARRAY(specialCases);
1960
1961		// Special cases
1962		for (int caseNdx = 0; caseNdx < numSpecialCases; caseNdx++)
1963		{
1964			for (int inputNdx = 0; inputNdx < 3; inputNdx++)
1965			{
1966				for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++)
1967					((float*)values[inputNdx])[caseNdx*scalarSize + scalarNdx] = specialCases[caseNdx][inputNdx];
1968			}
1969		}
1970
1971		// Random cases.
1972		{
1973			const int	numScalars	= (numValues-numSpecialCases)*scalarSize;
1974			const int	offs		= scalarSize*numSpecialCases;
1975
1976			for (int inputNdx = 0; inputNdx < 3; inputNdx++)
1977				fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[inputNdx] + offs, numScalars);
1978		}
1979
1980		// Make sure the values are representable in the target format
1981		if (precision != glu::PRECISION_HIGHP)
1982		{
1983			const float	largestRepresentableValue	= tcu::Float32::constructBits(+1, maxNormalizedValueExponent, ((1u << numMantissaBits) - 1u) << (23u - (deUint32)numMantissaBits)).asFloat();
1984
1985			// zero is not required to be representable, use smallest positive non-subnormal value
1986			const float zeroReplacement				= tcu::Float32::constructBits(+1, minNormalizedValueExponent, 1).asFloat();
1987
1988			for (int inputNdx = 0; inputNdx < 3; inputNdx++)
1989			{
1990				for (int caseNdx = 0; caseNdx < numValues; ++caseNdx)
1991				{
1992					for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++)
1993					{
1994						float&				value					= ((float*)values[inputNdx])[caseNdx * scalarSize + scalarNdx];
1995						const tcu::Float32	float32Representation	(value);
1996
1997						// flush too small values to zero
1998						if (float32Representation.exponent() < minNormalizedValueExponent)
1999						{
2000							value = zeroReplacement;
2001						}
2002						// clamp too large values
2003						else if (float32Representation.exponent() > maxNormalizedValueExponent)
2004						{
2005							value = (float32Representation.sign() == +1) ? (largestRepresentableValue) : (-largestRepresentableValue);
2006						}
2007						// remove unrepresentable mantissa bits
2008						else
2009						{
2010							const tcu::Float32	targetRepresentation	(tcu::Float32::constructBits(float32Representation.sign(),
2011																									 float32Representation.exponent(),
2012																									 float32Representation.mantissaBits() & representableMantissaMask));
2013
2014							value = targetRepresentation.asFloat();
2015						}
2016					}
2017				}
2018			}
2019		}
2020	}
2021
2022	bool compare (const void* const* inputs, const void* const* outputs)
2023	{
2024		const glu::DataType		type			= m_spec.inputs[0].varType.getBasicType();
2025		const glu::Precision	precision		= m_spec.inputs[0].varType.getPrecision();
2026		const int				scalarSize		= glu::getDataTypeScalarSize(type);
2027		const bool				signedZero		= supportsSignedZero(precision);
2028
2029		const int				mantissaBits	= getMinMantissaBits(precision);
2030
2031		for (int compNdx = 0; compNdx < scalarSize; compNdx++)
2032		{
2033			const float		a			= ((const float*)inputs[0])[compNdx];
2034			const float		b			= ((const float*)inputs[1])[compNdx];
2035			const float		c			= ((const float*)inputs[2])[compNdx];
2036			const float		res			= ((const float*)outputs[0])[compNdx];
2037			const float		ref			= a*b + c;
2038
2039			const int		numBitsLost	= 1; // allow last bit to vary
2040			const deUint32	maxUlpDiff	= getMaxUlpDiffFromBits(de::max(0, mantissaBits-numBitsLost));
2041
2042			const deUint32	ulpDiff		= signedZero ? getUlpDiff(res, ref) : getUlpDiffIgnoreZeroSign(res, ref);
2043
2044			if (ulpDiff > maxUlpDiff)
2045			{
2046				m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << " with ULP threshold "
2047						  << tcu::toHex(maxUlpDiff) << ", got ULP diff " << tcu::toHex(ulpDiff);
2048				return false;
2049			}
2050		}
2051
2052		return true;
2053	}
2054};
2055
2056ShaderCommonFunctionTests::ShaderCommonFunctionTests (Context& context)
2057	: TestCaseGroup(context, "common", "Common function tests")
2058{
2059}
2060
2061ShaderCommonFunctionTests::~ShaderCommonFunctionTests (void)
2062{
2063}
2064
2065template<class TestClass>
2066static void addFunctionCases (TestCaseGroup* parent, const char* functionName, bool floatTypes, bool intTypes, bool uintTypes, deUint32 shaderBits)
2067{
2068	tcu::TestCaseGroup* group = new tcu::TestCaseGroup(parent->getTestContext(), functionName, functionName);
2069	parent->addChild(group);
2070
2071	const glu::DataType scalarTypes[] =
2072	{
2073		glu::TYPE_FLOAT,
2074		glu::TYPE_INT,
2075		glu::TYPE_UINT
2076	};
2077
2078	for (int scalarTypeNdx = 0; scalarTypeNdx < DE_LENGTH_OF_ARRAY(scalarTypes); scalarTypeNdx++)
2079	{
2080		const glu::DataType scalarType = scalarTypes[scalarTypeNdx];
2081
2082		if ((!floatTypes && scalarType == glu::TYPE_FLOAT)	||
2083			(!intTypes && scalarType == glu::TYPE_INT)		||
2084			(!uintTypes && scalarType == glu::TYPE_UINT))
2085			continue;
2086
2087		for (int vecSize = 1; vecSize <= 4; vecSize++)
2088		{
2089			for (int prec = glu::PRECISION_LOWP; prec <= glu::PRECISION_HIGHP; prec++)
2090			{
2091				for (int shaderTypeNdx = 0; shaderTypeNdx < glu::SHADERTYPE_LAST; shaderTypeNdx++)
2092				{
2093					if (shaderBits & (1<<shaderTypeNdx))
2094						group->addChild(new TestClass(parent->getContext(), glu::DataType(scalarType + vecSize - 1), glu::Precision(prec), glu::ShaderType(shaderTypeNdx)));
2095				}
2096			}
2097		}
2098	}
2099}
2100
2101void ShaderCommonFunctionTests::init (void)
2102{
2103	enum
2104	{
2105		VS = (1<<glu::SHADERTYPE_VERTEX),
2106		TC = (1<<glu::SHADERTYPE_TESSELLATION_CONTROL),
2107		TE = (1<<glu::SHADERTYPE_TESSELLATION_EVALUATION),
2108		GS = (1<<glu::SHADERTYPE_GEOMETRY),
2109		FS = (1<<glu::SHADERTYPE_FRAGMENT),
2110		CS = (1<<glu::SHADERTYPE_COMPUTE),
2111
2112		ALL_SHADERS = VS|TC|TE|GS|FS|CS,
2113		NEW_SHADERS = TC|TE|GS|CS,
2114	};
2115
2116	//																	Float?	Int?	Uint?	Shaders
2117	addFunctionCases<AbsCase>				(this,	"abs",				true,	true,	false,	NEW_SHADERS);
2118	addFunctionCases<SignCase>				(this,	"sign",				true,	true,	false,	NEW_SHADERS);
2119	addFunctionCases<FloorCase>				(this,	"floor",			true,	false,	false,	NEW_SHADERS);
2120	addFunctionCases<TruncCase>				(this,	"trunc",			true,	false,	false,	NEW_SHADERS);
2121	addFunctionCases<RoundCase>				(this,	"round",			true,	false,	false,	NEW_SHADERS);
2122	addFunctionCases<RoundEvenCase>			(this,	"roundeven",		true,	false,	false,	NEW_SHADERS);
2123	addFunctionCases<CeilCase>				(this,	"ceil",				true,	false,	false,	NEW_SHADERS);
2124	addFunctionCases<FractCase>				(this,	"fract",			true,	false,	false,	NEW_SHADERS);
2125	// mod
2126	addFunctionCases<ModfCase>				(this,	"modf",				true,	false,	false,	NEW_SHADERS);
2127	// min
2128	// max
2129	// clamp
2130	// mix
2131	// step
2132	// smoothstep
2133	addFunctionCases<IsnanCase>				(this,	"isnan",			true,	false,	false,	NEW_SHADERS);
2134	addFunctionCases<IsinfCase>				(this,	"isinf",			true,	false,	false,	NEW_SHADERS);
2135	addFunctionCases<FloatBitsToIntCase>	(this,	"floatbitstoint",	true,	false,	false,	NEW_SHADERS);
2136	addFunctionCases<FloatBitsToUintCase>	(this,	"floatbitstouint",	true,	false,	false,	NEW_SHADERS);
2137
2138	addFunctionCases<FrexpCase>				(this,	"frexp",			true,	false,	false,	ALL_SHADERS);
2139	addFunctionCases<LdexpCase>				(this,	"ldexp",			true,	false,	false,	ALL_SHADERS);
2140	addFunctionCases<FmaCase>				(this,	"fma",				true,	false,	false,	ALL_SHADERS);
2141
2142	// (u)intBitsToFloat()
2143	{
2144		const deUint32		shaderBits	= NEW_SHADERS;
2145		tcu::TestCaseGroup* intGroup	= new tcu::TestCaseGroup(m_testCtx, "intbitstofloat",	"intBitsToFloat() Tests");
2146		tcu::TestCaseGroup* uintGroup	= new tcu::TestCaseGroup(m_testCtx, "uintbitstofloat",	"uintBitsToFloat() Tests");
2147
2148		addChild(intGroup);
2149		addChild(uintGroup);
2150
2151		for (int vecSize = 1; vecSize < 4; vecSize++)
2152		{
2153			const glu::DataType		intType		= vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT;
2154			const glu::DataType		uintType	= vecSize > 1 ? glu::getDataTypeUintVec(vecSize) : glu::TYPE_UINT;
2155
2156			for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
2157			{
2158				if (shaderBits & (1<<shaderType))
2159				{
2160					intGroup->addChild(new BitsToFloatCase(m_context, intType, glu::ShaderType(shaderType)));
2161					uintGroup->addChild(new BitsToFloatCase(m_context, uintType, glu::ShaderType(shaderType)));
2162				}
2163			}
2164		}
2165	}
2166}
2167
2168} // Functional
2169} // gles31
2170} // deqp
2171