es31fShaderCommonFunctionTests.cpp revision e5fc2a804322422a6d50270e9e00fb1c193b6f90
1/*------------------------------------------------------------------------- 2 * drawElements Quality Program OpenGL ES 3.1 Module 3 * ------------------------------------------------- 4 * 5 * Copyright 2014 The Android Open Source Project 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); 8 * you may not use this file except in compliance with the License. 9 * You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 * 19 *//*! 20 * \file 21 * \brief Common built-in function tests. 22 *//*--------------------------------------------------------------------*/ 23 24#include "es31fShaderCommonFunctionTests.hpp" 25#include "gluContextInfo.hpp" 26#include "glsShaderExecUtil.hpp" 27#include "tcuTestLog.hpp" 28#include "tcuFormatUtil.hpp" 29#include "tcuFloat.hpp" 30#include "tcuInterval.hpp" 31#include "tcuFloatFormat.hpp" 32#include "deRandom.hpp" 33#include "deMath.h" 34#include "deString.h" 35 36namespace deqp 37{ 38namespace gles31 39{ 40namespace Functional 41{ 42 43using std::vector; 44using std::string; 45using tcu::TestLog; 46using namespace gls::ShaderExecUtil; 47 48using tcu::Vec2; 49using tcu::Vec3; 50using tcu::Vec4; 51using tcu::IVec2; 52using tcu::IVec3; 53using tcu::IVec4; 54 55// Utilities 56 57template<typename T, int Size> 58struct VecArrayAccess 59{ 60public: 61 VecArrayAccess (const void* ptr) : m_array((tcu::Vector<T, Size>*)ptr) {} 62 ~VecArrayAccess (void) {} 63 64 const tcu::Vector<T, Size>& operator[] (size_t offset) const { return m_array[offset]; } 65 tcu::Vector<T, Size>& operator[] (size_t offset) { return m_array[offset]; } 66 67private: 68 tcu::Vector<T, Size>* m_array; 69}; 70 71template<typename T> T randomScalar (de::Random& rnd, T minValue, T maxValue); 72template<> inline float randomScalar (de::Random& rnd, float minValue, float maxValue) { return rnd.getFloat(minValue, maxValue); } 73template<> inline deInt32 randomScalar (de::Random& rnd, deInt32 minValue, deInt32 maxValue) { return rnd.getInt(minValue, maxValue); } 74template<> inline deUint32 randomScalar (de::Random& rnd, deUint32 minValue, deUint32 maxValue) { return minValue + rnd.getUint32() % (maxValue - minValue + 1); } 75 76template<typename T, int Size> 77inline tcu::Vector<T, Size> randomVector (de::Random& rnd, const tcu::Vector<T, Size>& minValue, const tcu::Vector<T, Size>& maxValue) 78{ 79 tcu::Vector<T, Size> res; 80 for (int ndx = 0; ndx < Size; ndx++) 81 res[ndx] = randomScalar<T>(rnd, minValue[ndx], maxValue[ndx]); 82 return res; 83} 84 85template<typename T, int Size> 86static void fillRandomVectors (de::Random& rnd, const tcu::Vector<T, Size>& minValue, const tcu::Vector<T, Size>& maxValue, void* dst, int numValues, int offset = 0) 87{ 88 VecArrayAccess<T, Size> access(dst); 89 for (int ndx = 0; ndx < numValues; ndx++) 90 access[offset + ndx] = randomVector<T, Size>(rnd, minValue, maxValue); 91} 92 93template<typename T> 94static void fillRandomScalars (de::Random& rnd, T minValue, T maxValue, void* dst, int numValues, int offset = 0) 95{ 96 T* typedPtr = (T*)dst; 97 for (int ndx = 0; ndx < numValues; ndx++) 98 typedPtr[offset + ndx] = randomScalar<T>(rnd, minValue, maxValue); 99} 100 101inline int numBitsLostInOp (float input, float output) 102{ 103 const int inExp = tcu::Float32(input).exponent(); 104 const int outExp = tcu::Float32(output).exponent(); 105 106 return de::max(0, inExp-outExp); // Lost due to mantissa shift. 107} 108 109inline deUint32 getUlpDiff (float a, float b) 110{ 111 const deUint32 aBits = tcu::Float32(a).bits(); 112 const deUint32 bBits = tcu::Float32(b).bits(); 113 return aBits > bBits ? aBits - bBits : bBits - aBits; 114} 115 116inline deUint32 getUlpDiffIgnoreZeroSign (float a, float b) 117{ 118 if (tcu::Float32(a).isZero()) 119 return getUlpDiff(tcu::Float32::construct(tcu::Float32(b).sign(), 0, 0).asFloat(), b); 120 else if (tcu::Float32(b).isZero()) 121 return getUlpDiff(a, tcu::Float32::construct(tcu::Float32(a).sign(), 0, 0).asFloat()); 122 else 123 return getUlpDiff(a, b); 124} 125 126inline bool supportsSignedZero (glu::Precision precision) 127{ 128 // \note GLSL ES 3.1 doesn't really require support for -0, but we require it for highp 129 // as it is very widely supported. 130 return precision == glu::PRECISION_HIGHP; 131} 132 133inline float getEpsFromMaxUlpDiff (float value, deUint32 ulpDiff) 134{ 135 const int exp = tcu::Float32(value).exponent(); 136 return tcu::Float32::construct(+1, exp, (1u<<23) | ulpDiff).asFloat() - tcu::Float32::construct(+1, exp, 1u<<23).asFloat(); 137} 138 139inline deUint32 getMaxUlpDiffFromBits (int numAccurateBits) 140{ 141 const int numGarbageBits = 23-numAccurateBits; 142 const deUint32 mask = (1u<<numGarbageBits)-1u; 143 144 return mask; 145} 146 147inline float getEpsFromBits (float value, int numAccurateBits) 148{ 149 return getEpsFromMaxUlpDiff(value, getMaxUlpDiffFromBits(numAccurateBits)); 150} 151 152static int getMinMantissaBits (glu::Precision precision) 153{ 154 const int bits[] = 155 { 156 7, // lowp 157 10, // mediump 158 23 // highp 159 }; 160 DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(bits) == glu::PRECISION_LAST); 161 DE_ASSERT(de::inBounds<int>(precision, 0, DE_LENGTH_OF_ARRAY(bits))); 162 return bits[precision]; 163} 164 165static int getMaxNormalizedValueExponent (glu::Precision precision) 166{ 167 const int exponent[] = 168 { 169 0, // lowp 170 13, // mediump 171 127 // highp 172 }; 173 DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(exponent) == glu::PRECISION_LAST); 174 DE_ASSERT(de::inBounds<int>(precision, 0, DE_LENGTH_OF_ARRAY(exponent))); 175 return exponent[precision]; 176} 177 178static int getMinNormalizedValueExponent (glu::Precision precision) 179{ 180 const int exponent[] = 181 { 182 -7, // lowp 183 -13, // mediump 184 -126 // highp 185 }; 186 DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(exponent) == glu::PRECISION_LAST); 187 DE_ASSERT(de::inBounds<int>(precision, 0, DE_LENGTH_OF_ARRAY(exponent))); 188 return exponent[precision]; 189} 190 191// CommonFunctionCase 192 193class CommonFunctionCase : public TestCase 194{ 195public: 196 CommonFunctionCase (Context& context, const char* name, const char* description, glu::ShaderType shaderType); 197 ~CommonFunctionCase (void); 198 199 void init (void); 200 void deinit (void); 201 IterateResult iterate (void); 202 203protected: 204 CommonFunctionCase (const CommonFunctionCase& other); 205 CommonFunctionCase& operator= (const CommonFunctionCase& other); 206 207 virtual void getInputValues (int numValues, void* const* values) const = 0; 208 virtual bool compare (const void* const* inputs, const void* const* outputs) = 0; 209 210 glu::ShaderType m_shaderType; 211 ShaderSpec m_spec; 212 int m_numValues; 213 214 std::ostringstream m_failMsg; //!< Comparison failure help message. 215 216private: 217 ShaderExecutor* m_executor; 218}; 219 220CommonFunctionCase::CommonFunctionCase (Context& context, const char* name, const char* description, glu::ShaderType shaderType) 221 : TestCase (context, name, description) 222 , m_shaderType (shaderType) 223 , m_numValues (100) 224 , m_executor (DE_NULL) 225{ 226 m_spec.version = glu::GLSL_VERSION_310_ES; 227} 228 229CommonFunctionCase::~CommonFunctionCase (void) 230{ 231 CommonFunctionCase::deinit(); 232} 233 234void CommonFunctionCase::init (void) 235{ 236 DE_ASSERT(!m_executor); 237 238 m_executor = createExecutor(m_context.getRenderContext(), m_shaderType, m_spec); 239 m_testCtx.getLog() << m_executor; 240 241 if (!m_executor->isOk()) 242 throw tcu::TestError("Compile failed"); 243} 244 245void CommonFunctionCase::deinit (void) 246{ 247 delete m_executor; 248 m_executor = DE_NULL; 249} 250 251static vector<int> getScalarSizes (const vector<Symbol>& symbols) 252{ 253 vector<int> sizes(symbols.size()); 254 for (int ndx = 0; ndx < (int)symbols.size(); ++ndx) 255 sizes[ndx] = symbols[ndx].varType.getScalarSize(); 256 return sizes; 257} 258 259static int computeTotalScalarSize (const vector<Symbol>& symbols) 260{ 261 int totalSize = 0; 262 for (vector<Symbol>::const_iterator sym = symbols.begin(); sym != symbols.end(); ++sym) 263 totalSize += sym->varType.getScalarSize(); 264 return totalSize; 265} 266 267static vector<void*> getInputOutputPointers (const vector<Symbol>& symbols, vector<deUint32>& data, const int numValues) 268{ 269 vector<void*> pointers (symbols.size()); 270 int curScalarOffset = 0; 271 272 for (int varNdx = 0; varNdx < (int)symbols.size(); ++varNdx) 273 { 274 const Symbol& var = symbols[varNdx]; 275 const int scalarSize = var.varType.getScalarSize(); 276 277 // Uses planar layout as input/output specs do not support strides. 278 pointers[varNdx] = &data[curScalarOffset]; 279 curScalarOffset += scalarSize*numValues; 280 } 281 282 DE_ASSERT(curScalarOffset == (int)data.size()); 283 284 return pointers; 285} 286 287// \todo [2013-08-08 pyry] Make generic utility and move to glu? 288 289struct HexFloat 290{ 291 const float value; 292 HexFloat (const float value_) : value(value_) {} 293}; 294 295std::ostream& operator<< (std::ostream& str, const HexFloat& v) 296{ 297 return str << v.value << " / " << tcu::toHex(tcu::Float32(v.value).bits()); 298} 299 300struct HexBool 301{ 302 const deUint32 value; 303 HexBool (const deUint32 value_) : value(value_) {} 304}; 305 306std::ostream& operator<< (std::ostream& str, const HexBool& v) 307{ 308 return str << (v.value ? "true" : "false") << " / " << tcu::toHex(v.value); 309} 310 311struct VarValue 312{ 313 const glu::VarType& type; 314 const void* value; 315 316 VarValue (const glu::VarType& type_, const void* value_) : type(type_), value(value_) {} 317}; 318 319std::ostream& operator<< (std::ostream& str, const VarValue& varValue) 320{ 321 DE_ASSERT(varValue.type.isBasicType()); 322 323 const glu::DataType basicType = varValue.type.getBasicType(); 324 const glu::DataType scalarType = glu::getDataTypeScalarType(basicType); 325 const int numComponents = glu::getDataTypeScalarSize(basicType); 326 327 if (numComponents > 1) 328 str << glu::getDataTypeName(basicType) << "("; 329 330 for (int compNdx = 0; compNdx < numComponents; compNdx++) 331 { 332 if (compNdx != 0) 333 str << ", "; 334 335 switch (scalarType) 336 { 337 case glu::TYPE_FLOAT: str << HexFloat(((const float*)varValue.value)[compNdx]); break; 338 case glu::TYPE_INT: str << ((const deInt32*)varValue.value)[compNdx]; break; 339 case glu::TYPE_UINT: str << tcu::toHex(((const deUint32*)varValue.value)[compNdx]); break; 340 case glu::TYPE_BOOL: str << HexBool(((const deUint32*)varValue.value)[compNdx]); break; 341 342 default: 343 DE_ASSERT(false); 344 } 345 } 346 347 if (numComponents > 1) 348 str << ")"; 349 350 return str; 351} 352 353CommonFunctionCase::IterateResult CommonFunctionCase::iterate (void) 354{ 355 const int numInputScalars = computeTotalScalarSize(m_spec.inputs); 356 const int numOutputScalars = computeTotalScalarSize(m_spec.outputs); 357 vector<deUint32> inputData (numInputScalars * m_numValues); 358 vector<deUint32> outputData (numOutputScalars * m_numValues); 359 const vector<void*> inputPointers = getInputOutputPointers(m_spec.inputs, inputData, m_numValues); 360 const vector<void*> outputPointers = getInputOutputPointers(m_spec.outputs, outputData, m_numValues); 361 362 // Initialize input data. 363 getInputValues(m_numValues, &inputPointers[0]); 364 365 // Execute shader. 366 m_executor->useProgram(); 367 m_executor->execute(m_numValues, &inputPointers[0], &outputPointers[0]); 368 369 // Compare results. 370 { 371 const vector<int> inScalarSizes = getScalarSizes(m_spec.inputs); 372 const vector<int> outScalarSizes = getScalarSizes(m_spec.outputs); 373 vector<void*> curInputPtr (inputPointers.size()); 374 vector<void*> curOutputPtr (outputPointers.size()); 375 int numFailed = 0; 376 377 for (int valNdx = 0; valNdx < m_numValues; valNdx++) 378 { 379 // Set up pointers for comparison. 380 for (int inNdx = 0; inNdx < (int)curInputPtr.size(); ++inNdx) 381 curInputPtr[inNdx] = (deUint32*)inputPointers[inNdx] + inScalarSizes[inNdx]*valNdx; 382 383 for (int outNdx = 0; outNdx < (int)curOutputPtr.size(); ++outNdx) 384 curOutputPtr[outNdx] = (deUint32*)outputPointers[outNdx] + outScalarSizes[outNdx]*valNdx; 385 386 if (!compare(&curInputPtr[0], &curOutputPtr[0])) 387 { 388 // \todo [2013-08-08 pyry] We probably want to log reference value as well? 389 390 m_testCtx.getLog() << TestLog::Message << "ERROR: comparison failed for value " << valNdx << ":\n " << m_failMsg.str() << TestLog::EndMessage; 391 392 m_testCtx.getLog() << TestLog::Message << " inputs:" << TestLog::EndMessage; 393 for (int inNdx = 0; inNdx < (int)curInputPtr.size(); inNdx++) 394 m_testCtx.getLog() << TestLog::Message << " " << m_spec.inputs[inNdx].name << " = " 395 << VarValue(m_spec.inputs[inNdx].varType, curInputPtr[inNdx]) 396 << TestLog::EndMessage; 397 398 m_testCtx.getLog() << TestLog::Message << " outputs:" << TestLog::EndMessage; 399 for (int outNdx = 0; outNdx < (int)curOutputPtr.size(); outNdx++) 400 m_testCtx.getLog() << TestLog::Message << " " << m_spec.outputs[outNdx].name << " = " 401 << VarValue(m_spec.outputs[outNdx].varType, curOutputPtr[outNdx]) 402 << TestLog::EndMessage; 403 404 m_failMsg.str(""); 405 m_failMsg.clear(); 406 numFailed += 1; 407 } 408 } 409 410 m_testCtx.getLog() << TestLog::Message << (m_numValues - numFailed) << " / " << m_numValues << " values passed" << TestLog::EndMessage; 411 412 m_testCtx.setTestResult(numFailed == 0 ? QP_TEST_RESULT_PASS : QP_TEST_RESULT_FAIL, 413 numFailed == 0 ? "Pass" : "Result comparison failed"); 414 } 415 416 return STOP; 417} 418 419static const char* getPrecisionPostfix (glu::Precision precision) 420{ 421 static const char* s_postfix[] = 422 { 423 "_lowp", 424 "_mediump", 425 "_highp" 426 }; 427 DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(s_postfix) == glu::PRECISION_LAST); 428 DE_ASSERT(de::inBounds<int>(precision, 0, DE_LENGTH_OF_ARRAY(s_postfix))); 429 return s_postfix[precision]; 430} 431 432static const char* getShaderTypePostfix (glu::ShaderType shaderType) 433{ 434 static const char* s_postfix[] = 435 { 436 "_vertex", 437 "_fragment", 438 "_geometry", 439 "_tess_control", 440 "_tess_eval", 441 "_compute" 442 }; 443 DE_ASSERT(de::inBounds<int>(shaderType, 0, DE_LENGTH_OF_ARRAY(s_postfix))); 444 return s_postfix[shaderType]; 445} 446 447static std::string getCommonFuncCaseName (glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 448{ 449 return string(glu::getDataTypeName(baseType)) + getPrecisionPostfix(precision) + getShaderTypePostfix(shaderType); 450} 451 452class AbsCase : public CommonFunctionCase 453{ 454public: 455 AbsCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 456 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "abs", shaderType) 457 { 458 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 459 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); 460 m_spec.source = "out0 = abs(in0);"; 461 } 462 463 void getInputValues (int numValues, void* const* values) const 464 { 465 const Vec2 floatRanges[] = 466 { 467 Vec2(-2.0f, 2.0f), // lowp 468 Vec2(-1e3f, 1e3f), // mediump 469 Vec2(-1e7f, 1e7f) // highp 470 }; 471 const IVec2 intRanges[] = 472 { 473 IVec2(-(1<<7)+1, (1<<7)-1), 474 IVec2(-(1<<15)+1, (1<<15)-1), 475 IVec2(0x80000001, 0x7fffffff) 476 }; 477 478 de::Random rnd (deStringHash(getName()) ^ 0x235facu); 479 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 480 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 481 const int scalarSize = glu::getDataTypeScalarSize(type); 482 483 if (glu::isDataTypeFloatOrVec(type)) 484 fillRandomScalars(rnd, floatRanges[precision].x(), floatRanges[precision].y(), values[0], numValues*scalarSize); 485 else 486 fillRandomScalars(rnd, intRanges[precision].x(), intRanges[precision].y(), values[0], numValues*scalarSize); 487 } 488 489 bool compare (const void* const* inputs, const void* const* outputs) 490 { 491 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 492 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 493 const int scalarSize = glu::getDataTypeScalarSize(type); 494 495 if (glu::isDataTypeFloatOrVec(type)) 496 { 497 const int mantissaBits = getMinMantissaBits(precision); 498 const deUint32 maxUlpDiff = (1u<<(23-mantissaBits))-1u; 499 500 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 501 { 502 const float in0 = ((const float*)inputs[0])[compNdx]; 503 const float out0 = ((const float*)outputs[0])[compNdx]; 504 const float ref0 = de::abs(in0); 505 const deUint32 ulpDiff0 = getUlpDiff(out0, ref0); 506 507 if (ulpDiff0 > maxUlpDiff) 508 { 509 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref0) << " with ULP threshold " << maxUlpDiff << ", got ULP diff " << ulpDiff0; 510 return false; 511 } 512 } 513 } 514 else 515 { 516 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 517 { 518 const int in0 = ((const int*)inputs[0])[compNdx]; 519 const int out0 = ((const int*)outputs[0])[compNdx]; 520 const int ref0 = de::abs(in0); 521 522 if (out0 != ref0) 523 { 524 m_failMsg << "Expected [" << compNdx << "] = " << ref0; 525 return false; 526 } 527 } 528 } 529 530 return true; 531 } 532}; 533 534class SignCase : public CommonFunctionCase 535{ 536public: 537 SignCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 538 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "sign", shaderType) 539 { 540 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 541 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); 542 m_spec.source = "out0 = sign(in0);"; 543 } 544 545 void getInputValues (int numValues, void* const* values) const 546 { 547 const Vec2 floatRanges[] = 548 { 549 Vec2(-2.0f, 2.0f), // lowp 550 Vec2(-1e4f, 1e4f), // mediump - note: may end up as inf 551 Vec2(-1e8f, 1e8f) // highp - note: may end up as inf 552 }; 553 const IVec2 intRanges[] = 554 { 555 IVec2(-(1<<7), (1<<7)-1), 556 IVec2(-(1<<15), (1<<15)-1), 557 IVec2(0x80000000, 0x7fffffff) 558 }; 559 560 de::Random rnd (deStringHash(getName()) ^ 0x324u); 561 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 562 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 563 const int scalarSize = glu::getDataTypeScalarSize(type); 564 565 if (glu::isDataTypeFloatOrVec(type)) 566 { 567 // Special cases. 568 std::fill((float*)values[0], (float*)values[0] + scalarSize, +1.0f); 569 std::fill((float*)values[0], (float*)values[0] + scalarSize, -1.0f); 570 std::fill((float*)values[0], (float*)values[0] + scalarSize, 0.0f); 571 fillRandomScalars(rnd, floatRanges[precision].x(), floatRanges[precision].y(), (float*)values[0] + scalarSize*3, (numValues-3)*scalarSize); 572 } 573 else 574 { 575 std::fill((int*)values[0], (int*)values[0] + scalarSize, +1); 576 std::fill((int*)values[0], (int*)values[0] + scalarSize, -1); 577 std::fill((int*)values[0], (int*)values[0] + scalarSize, 0); 578 fillRandomScalars(rnd, intRanges[precision].x(), intRanges[precision].y(), (int*)values[0] + scalarSize*3, (numValues-3)*scalarSize); 579 } 580 } 581 582 bool compare (const void* const* inputs, const void* const* outputs) 583 { 584 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 585 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 586 const int scalarSize = glu::getDataTypeScalarSize(type); 587 588 if (glu::isDataTypeFloatOrVec(type)) 589 { 590 // Both highp and mediump should be able to represent -1, 0, and +1 exactly 591 const deUint32 maxUlpDiff = precision == glu::PRECISION_LOWP ? getMaxUlpDiffFromBits(getMinMantissaBits(precision)) : 0; 592 593 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 594 { 595 const float in0 = ((const float*)inputs[0])[compNdx]; 596 const float out0 = ((const float*)outputs[0])[compNdx]; 597 const float ref0 = in0 < 0.0f ? -1.0f : 598 in0 > 0.0f ? +1.0f : 0.0f; 599 const deUint32 ulpDiff0 = getUlpDiff(out0, ref0); 600 601 if (ulpDiff0 > maxUlpDiff) 602 { 603 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref0) << " with ULP threshold " << maxUlpDiff << ", got ULP diff " << ulpDiff0; 604 return false; 605 } 606 } 607 } 608 else 609 { 610 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 611 { 612 const int in0 = ((const int*)inputs[0])[compNdx]; 613 const int out0 = ((const int*)outputs[0])[compNdx]; 614 const int ref0 = in0 < 0 ? -1 : 615 in0 > 0 ? +1 : 0; 616 617 if (out0 != ref0) 618 { 619 m_failMsg << "Expected [" << compNdx << "] = " << ref0; 620 return false; 621 } 622 } 623 } 624 625 return true; 626 } 627}; 628 629static float roundEven (float v) 630{ 631 const float q = deFloatFrac(v); 632 const int truncated = int(v-q); 633 const int rounded = (q > 0.5f) ? (truncated + 1) : // Rounded up 634 (q == 0.5f && (truncated % 2 != 0)) ? (truncated + 1) : // Round to nearest even at 0.5 635 truncated; // Rounded down 636 637 return float(rounded); 638} 639 640class RoundEvenCase : public CommonFunctionCase 641{ 642public: 643 RoundEvenCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 644 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "roundEven", shaderType) 645 { 646 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 647 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); 648 m_spec.source = "out0 = roundEven(in0);"; 649 } 650 651 void getInputValues (int numValues, void* const* values) const 652 { 653 const Vec2 ranges[] = 654 { 655 Vec2(-2.0f, 2.0f), // lowp 656 Vec2(-1e3f, 1e3f), // mediump 657 Vec2(-1e7f, 1e7f) // highp 658 }; 659 660 de::Random rnd (deStringHash(getName()) ^ 0xac23fu); 661 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 662 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 663 const int scalarSize = glu::getDataTypeScalarSize(type); 664 int numSpecialCases = 0; 665 666 // Special cases. 667 if (precision != glu::PRECISION_LOWP) 668 { 669 DE_ASSERT(numValues >= 20); 670 for (int ndx = 0; ndx < 20; ndx++) 671 { 672 const float v = de::clamp(float(ndx) - 10.5f, ranges[precision].x(), ranges[precision].y()); 673 std::fill((float*)values[0], (float*)values[0] + scalarSize, v); 674 numSpecialCases += 1; 675 } 676 } 677 678 // Random cases. 679 fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0] + numSpecialCases*scalarSize, (numValues-numSpecialCases)*scalarSize); 680 681 // If precision is mediump, make sure values can be represented in fp16 exactly 682 if (precision == glu::PRECISION_MEDIUMP) 683 { 684 for (int ndx = 0; ndx < numValues*scalarSize; ndx++) 685 ((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat(); 686 } 687 } 688 689 bool compare (const void* const* inputs, const void* const* outputs) 690 { 691 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 692 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 693 const bool hasSignedZero = supportsSignedZero(precision); 694 const int scalarSize = glu::getDataTypeScalarSize(type); 695 696 if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP) 697 { 698 // Require exact rounding result. 699 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 700 { 701 const float in0 = ((const float*)inputs[0])[compNdx]; 702 const float out0 = ((const float*)outputs[0])[compNdx]; 703 const float ref = roundEven(in0); 704 705 const deUint32 ulpDiff = hasSignedZero ? getUlpDiff(out0, ref) : getUlpDiffIgnoreZeroSign(out0, ref); 706 707 if (ulpDiff > 0) 708 { 709 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff); 710 return false; 711 } 712 } 713 } 714 else 715 { 716 const int mantissaBits = getMinMantissaBits(precision); 717 const deUint32 maxUlpDiff = getMaxUlpDiffFromBits(mantissaBits); // ULP diff for rounded integer value. 718 const float eps = getEpsFromBits(1.0f, mantissaBits); // epsilon for rounding bounds 719 720 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 721 { 722 const float in0 = ((const float*)inputs[0])[compNdx]; 723 const float out0 = ((const float*)outputs[0])[compNdx]; 724 const int minRes = int(roundEven(in0-eps)); 725 const int maxRes = int(roundEven(in0+eps)); 726 bool anyOk = false; 727 728 for (int roundedVal = minRes; roundedVal <= maxRes; roundedVal++) 729 { 730 const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(out0, float(roundedVal)); 731 732 if (ulpDiff <= maxUlpDiff) 733 { 734 anyOk = true; 735 break; 736 } 737 } 738 739 if (!anyOk) 740 { 741 m_failMsg << "Expected [" << compNdx << "] = [" << minRes << ", " << maxRes << "] with ULP threshold " << tcu::toHex(maxUlpDiff); 742 return false; 743 } 744 } 745 } 746 747 return true; 748 } 749}; 750 751class ModfCase : public CommonFunctionCase 752{ 753public: 754 ModfCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 755 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "modf", shaderType) 756 { 757 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 758 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); 759 m_spec.outputs.push_back(Symbol("out1", glu::VarType(baseType, precision))); 760 m_spec.source = "out0 = modf(in0, out1);"; 761 } 762 763 void getInputValues (int numValues, void* const* values) const 764 { 765 const Vec2 ranges[] = 766 { 767 Vec2(-2.0f, 2.0f), // lowp 768 Vec2(-1e3f, 1e3f), // mediump 769 Vec2(-1e7f, 1e7f) // highp 770 }; 771 772 de::Random rnd (deStringHash(getName()) ^ 0xac23fu); 773 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 774 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 775 const int scalarSize = glu::getDataTypeScalarSize(type); 776 777 fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), values[0], numValues*scalarSize); 778 } 779 780 bool compare (const void* const* inputs, const void* const* outputs) 781 { 782 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 783 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 784 const bool hasZeroSign = supportsSignedZero(precision); 785 const int scalarSize = glu::getDataTypeScalarSize(type); 786 787 const int mantissaBits = getMinMantissaBits(precision); 788 789 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 790 { 791 const float in0 = ((const float*)inputs[0])[compNdx]; 792 const float out0 = ((const float*)outputs[0])[compNdx]; 793 const float out1 = ((const float*)outputs[1])[compNdx]; 794 795 const float refOut1 = float(int(in0)); 796 const float refOut0 = in0 - refOut1; 797 798 const int bitsLost = precision != glu::PRECISION_HIGHP ? numBitsLostInOp(in0, refOut0) : 0; 799 const deUint32 maxUlpDiff = getMaxUlpDiffFromBits(de::max(mantissaBits - bitsLost, 0)); 800 801 const float resSum = out0 + out1; 802 803 const deUint32 ulpDiff = hasZeroSign ? getUlpDiff(resSum, in0) : getUlpDiffIgnoreZeroSign(resSum, in0); 804 805 if (ulpDiff > maxUlpDiff) 806 { 807 m_failMsg << "Expected [" << compNdx << "] = (" << HexFloat(refOut0) << ") + (" << HexFloat(refOut1) << ") = " << HexFloat(in0) << " with ULP threshold " 808 << tcu::toHex(maxUlpDiff) << ", got ULP diff " << tcu::toHex(ulpDiff); 809 return false; 810 } 811 } 812 813 return true; 814 } 815}; 816 817class IsnanCase : public CommonFunctionCase 818{ 819public: 820 IsnanCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 821 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "isnan", shaderType) 822 { 823 DE_ASSERT(glu::isDataTypeFloatOrVec(baseType)); 824 825 const int vecSize = glu::getDataTypeScalarSize(baseType); 826 const glu::DataType boolType = vecSize > 1 ? glu::getDataTypeBoolVec(vecSize) : glu::TYPE_BOOL; 827 828 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 829 m_spec.outputs.push_back(Symbol("out0", glu::VarType(boolType, glu::PRECISION_LAST))); 830 m_spec.source = "out0 = isnan(in0);"; 831 } 832 833 void getInputValues (int numValues, void* const* values) const 834 { 835 de::Random rnd (deStringHash(getName()) ^ 0xc2a39fu); 836 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 837 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 838 const int scalarSize = glu::getDataTypeScalarSize(type); 839 const int mantissaBits = getMinMantissaBits(precision); 840 const deUint32 mantissaMask = ~getMaxUlpDiffFromBits(mantissaBits) & ((1u<<23)-1u); 841 842 for (int valNdx = 0; valNdx < numValues*scalarSize; valNdx++) 843 { 844 const bool isNan = rnd.getFloat() > 0.3f; 845 const bool isInf = !isNan && rnd.getFloat() > 0.4f; 846 const deUint32 mantissa = !isInf ? ((1u<<22) | (rnd.getUint32() & mantissaMask)) : 0; 847 const deUint32 exp = !isNan && !isInf ? (rnd.getUint32() & 0x7fu) : 0xffu; 848 const deUint32 sign = rnd.getUint32() & 0x1u; 849 const deUint32 value = (sign << 31) | (exp << 23) | mantissa; 850 851 DE_ASSERT(tcu::Float32(value).isInf() == isInf && tcu::Float32(value).isNaN() == isNan); 852 853 ((deUint32*)values[0])[valNdx] = value; 854 } 855 } 856 857 bool compare (const void* const* inputs, const void* const* outputs) 858 { 859 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 860 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 861 const int scalarSize = glu::getDataTypeScalarSize(type); 862 863 if (precision == glu::PRECISION_HIGHP) 864 { 865 // Only highp is required to support inf/nan 866 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 867 { 868 const float in0 = ((const float*)inputs[0])[compNdx]; 869 const bool out0 = ((const deUint32*)outputs[0])[compNdx] != 0; 870 const bool ref = tcu::Float32(in0).isNaN(); 871 872 if (out0 != ref) 873 { 874 m_failMsg << "Expected [" << compNdx << "] = " << (ref ? "true" : "false"); 875 return false; 876 } 877 } 878 } 879 else if (precision == glu::PRECISION_MEDIUMP || precision == glu::PRECISION_LOWP) 880 { 881 // NaN support is optional, check that inputs that are not NaN don't result in true. 882 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 883 { 884 const float in0 = ((const float*)inputs[0])[compNdx]; 885 const bool out0 = ((const deUint32*)outputs[0])[compNdx] != 0; 886 const bool ref = tcu::Float32(in0).isNaN(); 887 888 if (!ref && out0) 889 { 890 m_failMsg << "Expected [" << compNdx << "] = " << (ref ? "true" : "false"); 891 return false; 892 } 893 } 894 } 895 896 return true; 897 } 898}; 899 900class IsinfCase : public CommonFunctionCase 901{ 902public: 903 IsinfCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 904 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "isinf", shaderType) 905 { 906 DE_ASSERT(glu::isDataTypeFloatOrVec(baseType)); 907 908 const int vecSize = glu::getDataTypeScalarSize(baseType); 909 const glu::DataType boolType = vecSize > 1 ? glu::getDataTypeBoolVec(vecSize) : glu::TYPE_BOOL; 910 911 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 912 m_spec.outputs.push_back(Symbol("out0", glu::VarType(boolType, glu::PRECISION_LAST))); 913 m_spec.source = "out0 = isinf(in0);"; 914 } 915 916 void getInputValues (int numValues, void* const* values) const 917 { 918 de::Random rnd (deStringHash(getName()) ^ 0xc2a39fu); 919 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 920 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 921 const int scalarSize = glu::getDataTypeScalarSize(type); 922 const int mantissaBits = getMinMantissaBits(precision); 923 const deUint32 mantissaMask = ~getMaxUlpDiffFromBits(mantissaBits) & ((1u<<23)-1u); 924 925 for (int valNdx = 0; valNdx < numValues*scalarSize; valNdx++) 926 { 927 const bool isInf = rnd.getFloat() > 0.3f; 928 const bool isNan = !isInf && rnd.getFloat() > 0.4f; 929 const deUint32 mantissa = !isInf ? ((1u<<22) | (rnd.getUint32() & mantissaMask)) : 0; 930 const deUint32 exp = !isNan && !isInf ? (rnd.getUint32() & 0x7fu) : 0xffu; 931 const deUint32 sign = rnd.getUint32() & 0x1u; 932 const deUint32 value = (sign << 31) | (exp << 23) | mantissa; 933 934 DE_ASSERT(tcu::Float32(value).isInf() == isInf && tcu::Float32(value).isNaN() == isNan); 935 936 ((deUint32*)values[0])[valNdx] = value; 937 } 938 } 939 940 bool compare (const void* const* inputs, const void* const* outputs) 941 { 942 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 943 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 944 const int scalarSize = glu::getDataTypeScalarSize(type); 945 946 if (precision == glu::PRECISION_HIGHP) 947 { 948 // Only highp is required to support inf/nan 949 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 950 { 951 const float in0 = ((const float*)inputs[0])[compNdx]; 952 const bool out0 = ((const deUint32*)outputs[0])[compNdx] != 0; 953 const bool ref = tcu::Float32(in0).isInf(); 954 955 if (out0 != ref) 956 { 957 m_failMsg << "Expected [" << compNdx << "] = " << HexBool(ref); 958 return false; 959 } 960 } 961 } 962 else if (precision == glu::PRECISION_MEDIUMP) 963 { 964 // Inf support is optional, check that inputs that are not Inf in mediump don't result in true. 965 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 966 { 967 const float in0 = ((const float*)inputs[0])[compNdx]; 968 const bool out0 = ((const deUint32*)outputs[0])[compNdx] != 0; 969 const bool ref = tcu::Float16(in0).isInf(); 970 971 if (!ref && out0) 972 { 973 m_failMsg << "Expected [" << compNdx << "] = " << (ref ? "true" : "false"); 974 return false; 975 } 976 } 977 } 978 // else: no verification can be performed 979 980 return true; 981 } 982}; 983 984class FloatBitsToUintIntCase : public CommonFunctionCase 985{ 986public: 987 FloatBitsToUintIntCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType, bool outIsSigned) 988 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), outIsSigned ? "floatBitsToInt" : "floatBitsToUint", shaderType) 989 { 990 const int vecSize = glu::getDataTypeScalarSize(baseType); 991 const glu::DataType intType = outIsSigned ? (vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT) 992 : (vecSize > 1 ? glu::getDataTypeUintVec(vecSize) : glu::TYPE_UINT); 993 994 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 995 m_spec.outputs.push_back(Symbol("out0", glu::VarType(intType, glu::PRECISION_HIGHP))); 996 m_spec.source = outIsSigned ? "out0 = floatBitsToInt(in0);" : "out0 = floatBitsToUint(in0);"; 997 } 998 999 void getInputValues (int numValues, void* const* values) const 1000 { 1001 const Vec2 ranges[] = 1002 { 1003 Vec2(-2.0f, 2.0f), // lowp 1004 Vec2(-1e3f, 1e3f), // mediump 1005 Vec2(-1e7f, 1e7f) // highp 1006 }; 1007 1008 de::Random rnd (deStringHash(getName()) ^ 0x2790au); 1009 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1010 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1011 const int scalarSize = glu::getDataTypeScalarSize(type); 1012 1013 fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), values[0], numValues*scalarSize); 1014 } 1015 1016 bool compare (const void* const* inputs, const void* const* outputs) 1017 { 1018 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1019 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1020 const int scalarSize = glu::getDataTypeScalarSize(type); 1021 1022 const int mantissaBits = getMinMantissaBits(precision); 1023 const int maxUlpDiff = getMaxUlpDiffFromBits(mantissaBits); 1024 1025 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1026 { 1027 const float in0 = ((const float*)inputs[0])[compNdx]; 1028 const deUint32 out0 = ((const deUint32*)outputs[0])[compNdx]; 1029 const deUint32 refOut0 = tcu::Float32(in0).bits(); 1030 const int ulpDiff = de::abs((int)out0 - (int)refOut0); 1031 1032 if (ulpDiff > maxUlpDiff) 1033 { 1034 m_failMsg << "Expected [" << compNdx << "] = " << tcu::toHex(refOut0) << " with threshold " 1035 << tcu::toHex(maxUlpDiff) << ", got diff " << tcu::toHex(ulpDiff); 1036 return false; 1037 } 1038 } 1039 1040 return true; 1041 } 1042}; 1043 1044class FloatBitsToIntCase : public FloatBitsToUintIntCase 1045{ 1046public: 1047 FloatBitsToIntCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 1048 : FloatBitsToUintIntCase(context, baseType, precision, shaderType, true) 1049 { 1050 } 1051}; 1052 1053class FloatBitsToUintCase : public FloatBitsToUintIntCase 1054{ 1055public: 1056 FloatBitsToUintCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 1057 : FloatBitsToUintIntCase(context, baseType, precision, shaderType, false) 1058 { 1059 } 1060}; 1061 1062class BitsToFloatCase : public CommonFunctionCase 1063{ 1064public: 1065 BitsToFloatCase (Context& context, glu::DataType baseType, glu::ShaderType shaderType) 1066 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, glu::PRECISION_HIGHP, shaderType).c_str(), glu::isDataTypeIntOrIVec(baseType) ? "intBitsToFloat" : "uintBitsToFloat", shaderType) 1067 { 1068 const bool inIsSigned = glu::isDataTypeIntOrIVec(baseType); 1069 const int vecSize = glu::getDataTypeScalarSize(baseType); 1070 const glu::DataType floatType = vecSize > 1 ? glu::getDataTypeFloatVec(vecSize) : glu::TYPE_FLOAT; 1071 1072 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, glu::PRECISION_HIGHP))); 1073 m_spec.outputs.push_back(Symbol("out0", glu::VarType(floatType, glu::PRECISION_HIGHP))); 1074 m_spec.source = inIsSigned ? "out0 = intBitsToFloat(in0);" : "out0 = uintBitsToFloat(in0);"; 1075 } 1076 1077 void getInputValues (int numValues, void* const* values) const 1078 { 1079 de::Random rnd (deStringHash(getName()) ^ 0xbbb225u); 1080 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1081 const int scalarSize = glu::getDataTypeScalarSize(type); 1082 const Vec2 range (-1e8f, +1e8f); 1083 1084 // \note Filled as floats. 1085 fillRandomScalars(rnd, range.x(), range.y(), values[0], numValues*scalarSize); 1086 } 1087 1088 bool compare (const void* const* inputs, const void* const* outputs) 1089 { 1090 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1091 const int scalarSize = glu::getDataTypeScalarSize(type); 1092 const int maxUlpDiff = 0; 1093 1094 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1095 { 1096 const float in0 = ((const float*)inputs[0])[compNdx]; 1097 const float out0 = ((const float*)outputs[0])[compNdx]; 1098 const int ulpDiff = de::abs((int)in0 - (int)out0); 1099 1100 if (ulpDiff > maxUlpDiff) 1101 { 1102 m_failMsg << "Expected [" << compNdx << "] = " << tcu::toHex(in0) << " with ULP threshold " 1103 << tcu::toHex(maxUlpDiff) << ", got ULP diff " << tcu::toHex(ulpDiff); 1104 return false; 1105 } 1106 } 1107 1108 return true; 1109 } 1110}; 1111 1112class FloorCase : public CommonFunctionCase 1113{ 1114public: 1115 FloorCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 1116 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "floor", shaderType) 1117 { 1118 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 1119 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); 1120 m_spec.source = "out0 = floor(in0);"; 1121 } 1122 1123 void getInputValues (int numValues, void* const* values) const 1124 { 1125 const Vec2 ranges[] = 1126 { 1127 Vec2(-2.0f, 2.0f), // lowp 1128 Vec2(-1e3f, 1e3f), // mediump 1129 Vec2(-1e7f, 1e7f) // highp 1130 }; 1131 1132 de::Random rnd (deStringHash(getName()) ^ 0xac23fu); 1133 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1134 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1135 const int scalarSize = glu::getDataTypeScalarSize(type); 1136 // Random cases. 1137 fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0], numValues*scalarSize); 1138 1139 // If precision is mediump, make sure values can be represented in fp16 exactly 1140 if (precision == glu::PRECISION_MEDIUMP) 1141 { 1142 for (int ndx = 0; ndx < numValues*scalarSize; ndx++) 1143 ((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat(); 1144 } 1145 } 1146 1147 bool compare (const void* const* inputs, const void* const* outputs) 1148 { 1149 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1150 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1151 const int scalarSize = glu::getDataTypeScalarSize(type); 1152 1153 if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP) 1154 { 1155 // Require exact result. 1156 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1157 { 1158 const float in0 = ((const float*)inputs[0])[compNdx]; 1159 const float out0 = ((const float*)outputs[0])[compNdx]; 1160 const float ref = deFloatFloor(in0); 1161 1162 const deUint32 ulpDiff = getUlpDiff(out0, ref); 1163 1164 if (ulpDiff > 0) 1165 { 1166 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff); 1167 return false; 1168 } 1169 } 1170 } 1171 else 1172 { 1173 const int mantissaBits = getMinMantissaBits(precision); 1174 const deUint32 maxUlpDiff = getMaxUlpDiffFromBits(mantissaBits); // ULP diff for rounded integer value. 1175 const float eps = getEpsFromBits(1.0f, mantissaBits); // epsilon for rounding bounds 1176 1177 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1178 { 1179 const float in0 = ((const float*)inputs[0])[compNdx]; 1180 const float out0 = ((const float*)outputs[0])[compNdx]; 1181 const int minRes = int(deFloatFloor(in0-eps)); 1182 const int maxRes = int(deFloatFloor(in0+eps)); 1183 bool anyOk = false; 1184 1185 for (int roundedVal = minRes; roundedVal <= maxRes; roundedVal++) 1186 { 1187 const deUint32 ulpDiff = getUlpDiff(out0, float(roundedVal)); 1188 1189 if (ulpDiff <= maxUlpDiff) 1190 { 1191 anyOk = true; 1192 break; 1193 } 1194 } 1195 1196 if (!anyOk) 1197 { 1198 m_failMsg << "Expected [" << compNdx << "] = [" << minRes << ", " << maxRes << "] with ULP threshold " << tcu::toHex(maxUlpDiff); 1199 return false; 1200 } 1201 } 1202 } 1203 1204 return true; 1205 } 1206}; 1207 1208class TruncCase : public CommonFunctionCase 1209{ 1210public: 1211 TruncCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 1212 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "trunc", shaderType) 1213 { 1214 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 1215 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); 1216 m_spec.source = "out0 = trunc(in0);"; 1217 } 1218 1219 void getInputValues (int numValues, void* const* values) const 1220 { 1221 const Vec2 ranges[] = 1222 { 1223 Vec2(-2.0f, 2.0f), // lowp 1224 Vec2(-1e3f, 1e3f), // mediump 1225 Vec2(-1e7f, 1e7f) // highp 1226 }; 1227 1228 de::Random rnd (deStringHash(getName()) ^ 0xac23fu); 1229 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1230 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1231 const int scalarSize = glu::getDataTypeScalarSize(type); 1232 const float specialCases[] = { 0.0f, -0.0f, -0.9f, 0.9f, 1.0f, -1.0f }; 1233 const int numSpecialCases = DE_LENGTH_OF_ARRAY(specialCases); 1234 1235 // Special cases 1236 for (int caseNdx = 0; caseNdx < numSpecialCases; caseNdx++) 1237 { 1238 for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++) 1239 ((float*)values[0])[caseNdx*scalarSize + scalarNdx] = specialCases[caseNdx]; 1240 } 1241 1242 // Random cases. 1243 fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0] + scalarSize*numSpecialCases, (numValues-numSpecialCases)*scalarSize); 1244 1245 // If precision is mediump, make sure values can be represented in fp16 exactly 1246 if (precision == glu::PRECISION_MEDIUMP) 1247 { 1248 for (int ndx = 0; ndx < numValues*scalarSize; ndx++) 1249 ((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat(); 1250 } 1251 } 1252 1253 bool compare (const void* const* inputs, const void* const* outputs) 1254 { 1255 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1256 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1257 const int scalarSize = glu::getDataTypeScalarSize(type); 1258 1259 if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP) 1260 { 1261 // Require exact result. 1262 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1263 { 1264 const float in0 = ((const float*)inputs[0])[compNdx]; 1265 const float out0 = ((const float*)outputs[0])[compNdx]; 1266 const bool isNeg = tcu::Float32(in0).sign() < 0; 1267 const float ref = isNeg ? (-float(int(-in0))) : float(int(in0)); 1268 1269 // \note: trunc() function definition is a bit broad on negative zeros. Ignore result sign if zero. 1270 const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(out0, ref); 1271 1272 if (ulpDiff > 0) 1273 { 1274 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff); 1275 return false; 1276 } 1277 } 1278 } 1279 else 1280 { 1281 const int mantissaBits = getMinMantissaBits(precision); 1282 const deUint32 maxUlpDiff = getMaxUlpDiffFromBits(mantissaBits); // ULP diff for rounded integer value. 1283 const float eps = getEpsFromBits(1.0f, mantissaBits); // epsilon for rounding bounds 1284 1285 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1286 { 1287 const float in0 = ((const float*)inputs[0])[compNdx]; 1288 const float out0 = ((const float*)outputs[0])[compNdx]; 1289 const int minRes = int(in0-eps); 1290 const int maxRes = int(in0+eps); 1291 bool anyOk = false; 1292 1293 for (int roundedVal = minRes; roundedVal <= maxRes; roundedVal++) 1294 { 1295 const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(out0, float(roundedVal)); 1296 1297 if (ulpDiff <= maxUlpDiff) 1298 { 1299 anyOk = true; 1300 break; 1301 } 1302 } 1303 1304 if (!anyOk) 1305 { 1306 m_failMsg << "Expected [" << compNdx << "] = [" << minRes << ", " << maxRes << "] with ULP threshold " << tcu::toHex(maxUlpDiff); 1307 return false; 1308 } 1309 } 1310 } 1311 1312 return true; 1313 } 1314}; 1315 1316class RoundCase : public CommonFunctionCase 1317{ 1318public: 1319 RoundCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 1320 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "round", shaderType) 1321 { 1322 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 1323 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); 1324 m_spec.source = "out0 = round(in0);"; 1325 } 1326 1327 void getInputValues (int numValues, void* const* values) const 1328 { 1329 const Vec2 ranges[] = 1330 { 1331 Vec2(-2.0f, 2.0f), // lowp 1332 Vec2(-1e3f, 1e3f), // mediump 1333 Vec2(-1e7f, 1e7f) // highp 1334 }; 1335 1336 de::Random rnd (deStringHash(getName()) ^ 0xac23fu); 1337 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1338 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1339 const int scalarSize = glu::getDataTypeScalarSize(type); 1340 int numSpecialCases = 0; 1341 1342 // Special cases. 1343 if (precision != glu::PRECISION_LOWP) 1344 { 1345 DE_ASSERT(numValues >= 10); 1346 for (int ndx = 0; ndx < 10; ndx++) 1347 { 1348 const float v = de::clamp(float(ndx) - 5.5f, ranges[precision].x(), ranges[precision].y()); 1349 std::fill((float*)values[0], (float*)values[0] + scalarSize, v); 1350 numSpecialCases += 1; 1351 } 1352 } 1353 1354 // Random cases. 1355 fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0] + numSpecialCases*scalarSize, (numValues-numSpecialCases)*scalarSize); 1356 1357 // If precision is mediump, make sure values can be represented in fp16 exactly 1358 if (precision == glu::PRECISION_MEDIUMP) 1359 { 1360 for (int ndx = 0; ndx < numValues*scalarSize; ndx++) 1361 ((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat(); 1362 } 1363 } 1364 1365 bool compare (const void* const* inputs, const void* const* outputs) 1366 { 1367 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1368 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1369 const bool hasZeroSign = supportsSignedZero(precision); 1370 const int scalarSize = glu::getDataTypeScalarSize(type); 1371 1372 if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP) 1373 { 1374 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1375 { 1376 const float in0 = ((const float*)inputs[0])[compNdx]; 1377 const float out0 = ((const float*)outputs[0])[compNdx]; 1378 1379 if (deFloatFrac(in0) == 0.5f) 1380 { 1381 // Allow both ceil(in) and floor(in) 1382 const float ref0 = deFloatFloor(in0); 1383 const float ref1 = deFloatCeil(in0); 1384 const deUint32 ulpDiff0 = hasZeroSign ? getUlpDiff(out0, ref0) : getUlpDiffIgnoreZeroSign(out0, ref0); 1385 const deUint32 ulpDiff1 = hasZeroSign ? getUlpDiff(out0, ref1) : getUlpDiffIgnoreZeroSign(out0, ref1); 1386 1387 if (ulpDiff0 > 0 && ulpDiff1 > 0) 1388 { 1389 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref0) << " or " << HexFloat(ref1) << ", got ULP diff " << tcu::toHex(de::min(ulpDiff0, ulpDiff1)); 1390 return false; 1391 } 1392 } 1393 else 1394 { 1395 // Require exact result 1396 const float ref = roundEven(in0); 1397 const deUint32 ulpDiff = hasZeroSign ? getUlpDiff(out0, ref) : getUlpDiffIgnoreZeroSign(out0, ref); 1398 1399 if (ulpDiff > 0) 1400 { 1401 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff); 1402 return false; 1403 } 1404 } 1405 } 1406 } 1407 else 1408 { 1409 const int mantissaBits = getMinMantissaBits(precision); 1410 const deUint32 maxUlpDiff = getMaxUlpDiffFromBits(mantissaBits); // ULP diff for rounded integer value. 1411 const float eps = getEpsFromBits(1.0f, mantissaBits); // epsilon for rounding bounds 1412 1413 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1414 { 1415 const float in0 = ((const float*)inputs[0])[compNdx]; 1416 const float out0 = ((const float*)outputs[0])[compNdx]; 1417 const int minRes = int(roundEven(in0-eps)); 1418 const int maxRes = int(roundEven(in0+eps)); 1419 bool anyOk = false; 1420 1421 for (int roundedVal = minRes; roundedVal <= maxRes; roundedVal++) 1422 { 1423 const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(out0, float(roundedVal)); 1424 1425 if (ulpDiff <= maxUlpDiff) 1426 { 1427 anyOk = true; 1428 break; 1429 } 1430 } 1431 1432 if (!anyOk) 1433 { 1434 m_failMsg << "Expected [" << compNdx << "] = [" << minRes << ", " << maxRes << "] with ULP threshold " << tcu::toHex(maxUlpDiff); 1435 return false; 1436 } 1437 } 1438 } 1439 1440 return true; 1441 } 1442}; 1443 1444class CeilCase : public CommonFunctionCase 1445{ 1446public: 1447 CeilCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 1448 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "ceil", shaderType) 1449 { 1450 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 1451 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); 1452 m_spec.source = "out0 = ceil(in0);"; 1453 } 1454 1455 void getInputValues (int numValues, void* const* values) const 1456 { 1457 const Vec2 ranges[] = 1458 { 1459 Vec2(-2.0f, 2.0f), // lowp 1460 Vec2(-1e3f, 1e3f), // mediump 1461 Vec2(-1e7f, 1e7f) // highp 1462 }; 1463 1464 de::Random rnd (deStringHash(getName()) ^ 0xac23fu); 1465 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1466 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1467 const int scalarSize = glu::getDataTypeScalarSize(type); 1468 1469 // Random cases. 1470 fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0], numValues*scalarSize); 1471 1472 // If precision is mediump, make sure values can be represented in fp16 exactly 1473 if (precision == glu::PRECISION_MEDIUMP) 1474 { 1475 for (int ndx = 0; ndx < numValues*scalarSize; ndx++) 1476 ((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat(); 1477 } 1478 } 1479 1480 bool compare (const void* const* inputs, const void* const* outputs) 1481 { 1482 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1483 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1484 const bool hasZeroSign = supportsSignedZero(precision); 1485 const int scalarSize = glu::getDataTypeScalarSize(type); 1486 1487 if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP) 1488 { 1489 // Require exact result. 1490 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1491 { 1492 const float in0 = ((const float*)inputs[0])[compNdx]; 1493 const float out0 = ((const float*)outputs[0])[compNdx]; 1494 const float ref = deFloatCeil(in0); 1495 1496 const deUint32 ulpDiff = hasZeroSign ? getUlpDiff(out0, ref) : getUlpDiffIgnoreZeroSign(out0, ref); 1497 1498 if (ulpDiff > 0) 1499 { 1500 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff); 1501 return false; 1502 } 1503 } 1504 } 1505 else 1506 { 1507 const int mantissaBits = getMinMantissaBits(precision); 1508 const deUint32 maxUlpDiff = getMaxUlpDiffFromBits(mantissaBits); // ULP diff for rounded integer value. 1509 const float eps = getEpsFromBits(1.0f, mantissaBits); // epsilon for rounding bounds 1510 1511 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1512 { 1513 const float in0 = ((const float*)inputs[0])[compNdx]; 1514 const float out0 = ((const float*)outputs[0])[compNdx]; 1515 const int minRes = int(deFloatCeil(in0-eps)); 1516 const int maxRes = int(deFloatCeil(in0+eps)); 1517 bool anyOk = false; 1518 1519 for (int roundedVal = minRes; roundedVal <= maxRes; roundedVal++) 1520 { 1521 const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(out0, float(roundedVal)); 1522 1523 if (ulpDiff <= maxUlpDiff) 1524 { 1525 anyOk = true; 1526 break; 1527 } 1528 } 1529 1530 if (!anyOk && de::inRange(0, minRes, maxRes)) 1531 { 1532 // Allow -0 as well. 1533 const int ulpDiff = de::abs((int)tcu::Float32(out0).bits() - (int)0x80000000u); 1534 anyOk = ((deUint32)ulpDiff <= maxUlpDiff); 1535 } 1536 1537 if (!anyOk) 1538 { 1539 m_failMsg << "Expected [" << compNdx << "] = [" << minRes << ", " << maxRes << "] with ULP threshold " << tcu::toHex(maxUlpDiff); 1540 return false; 1541 } 1542 } 1543 } 1544 1545 return true; 1546 } 1547}; 1548 1549class FractCase : public CommonFunctionCase 1550{ 1551public: 1552 FractCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 1553 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "fract", shaderType) 1554 { 1555 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 1556 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision))); 1557 m_spec.source = "out0 = fract(in0);"; 1558 } 1559 1560 void getInputValues (int numValues, void* const* values) const 1561 { 1562 const Vec2 ranges[] = 1563 { 1564 Vec2(-2.0f, 2.0f), // lowp 1565 Vec2(-1e3f, 1e3f), // mediump 1566 Vec2(-1e7f, 1e7f) // highp 1567 }; 1568 1569 de::Random rnd (deStringHash(getName()) ^ 0xac23fu); 1570 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1571 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1572 const int scalarSize = glu::getDataTypeScalarSize(type); 1573 int numSpecialCases = 0; 1574 1575 // Special cases. 1576 if (precision != glu::PRECISION_LOWP) 1577 { 1578 DE_ASSERT(numValues >= 10); 1579 for (int ndx = 0; ndx < 10; ndx++) 1580 { 1581 const float v = de::clamp(float(ndx) - 5.5f, ranges[precision].x(), ranges[precision].y()); 1582 std::fill((float*)values[0], (float*)values[0] + scalarSize, v); 1583 numSpecialCases += 1; 1584 } 1585 } 1586 1587 // Random cases. 1588 fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0] + numSpecialCases*scalarSize, (numValues-numSpecialCases)*scalarSize); 1589 1590 // If precision is mediump, make sure values can be represented in fp16 exactly 1591 if (precision == glu::PRECISION_MEDIUMP) 1592 { 1593 for (int ndx = 0; ndx < numValues*scalarSize; ndx++) 1594 ((float*)values[0])[ndx] = tcu::Float16(((float*)values[0])[ndx]).asFloat(); 1595 } 1596 } 1597 1598 bool compare (const void* const* inputs, const void* const* outputs) 1599 { 1600 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1601 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1602 const bool hasZeroSign = supportsSignedZero(precision); 1603 const int scalarSize = glu::getDataTypeScalarSize(type); 1604 1605 if (precision == glu::PRECISION_HIGHP || precision == glu::PRECISION_MEDIUMP) 1606 { 1607 // Require exact result. 1608 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1609 { 1610 const float in0 = ((const float*)inputs[0])[compNdx]; 1611 const float out0 = ((const float*)outputs[0])[compNdx]; 1612 const float ref = deFloatFrac(in0); 1613 1614 const deUint32 ulpDiff = hasZeroSign ? getUlpDiff(out0, ref) : getUlpDiffIgnoreZeroSign(out0, ref); 1615 1616 if (ulpDiff > 0) 1617 { 1618 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << ", got ULP diff " << tcu::toHex(ulpDiff); 1619 return false; 1620 } 1621 } 1622 } 1623 else 1624 { 1625 const int mantissaBits = getMinMantissaBits(precision); 1626 const float eps = getEpsFromBits(1.0f, mantissaBits); // epsilon for rounding bounds 1627 1628 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1629 { 1630 const float in0 = ((const float*)inputs[0])[compNdx]; 1631 const float out0 = ((const float*)outputs[0])[compNdx]; 1632 1633 if (int(deFloatFloor(in0-eps)) == int(deFloatFloor(in0+eps))) 1634 { 1635 const float ref = deFloatFrac(in0); 1636 const int bitsLost = numBitsLostInOp(in0, ref); 1637 const deUint32 maxUlpDiff = getMaxUlpDiffFromBits(de::max(0, mantissaBits-bitsLost)); // ULP diff for rounded integer value. 1638 const deUint32 ulpDiff = getUlpDiffIgnoreZeroSign(out0, ref); 1639 1640 if (ulpDiff > maxUlpDiff) 1641 { 1642 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(ref) << " with ULP threshold " << tcu::toHex(maxUlpDiff) << ", got diff " << tcu::toHex(ulpDiff); 1643 return false; 1644 } 1645 } 1646 else 1647 { 1648 if (out0 >= 1.0f) 1649 { 1650 m_failMsg << "Expected [" << compNdx << "] < 1.0"; 1651 return false; 1652 } 1653 } 1654 } 1655 } 1656 1657 return true; 1658 } 1659}; 1660 1661static inline void frexp (float in, float* significand, int* exponent) 1662{ 1663 const tcu::Float32 fpValue(in); 1664 1665 if (!fpValue.isZero()) 1666 { 1667 // Construct float that has exactly the mantissa, and exponent of -1. 1668 *significand = tcu::Float32::construct(fpValue.sign(), -1, fpValue.mantissa()).asFloat(); 1669 *exponent = fpValue.exponent()+1; 1670 } 1671 else 1672 { 1673 *significand = fpValue.sign() < 0 ? -0.0f : 0.0f; 1674 *exponent = 0; 1675 } 1676} 1677 1678static inline float ldexp (float significand, int exponent) 1679{ 1680 const tcu::Float32 mant(significand); 1681 1682 if (exponent == 0 && mant.isZero()) 1683 { 1684 return mant.sign() < 0 ? -0.0f : 0.0f; 1685 } 1686 else 1687 { 1688 return tcu::Float32::construct(mant.sign(), exponent+mant.exponent(), mant.mantissa()).asFloat(); 1689 } 1690} 1691 1692class FrexpCase : public CommonFunctionCase 1693{ 1694public: 1695 FrexpCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 1696 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "frexp", shaderType) 1697 { 1698 const int vecSize = glu::getDataTypeScalarSize(baseType); 1699 const glu::DataType intType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT; 1700 1701 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 1702 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, glu::PRECISION_HIGHP))); 1703 m_spec.outputs.push_back(Symbol("out1", glu::VarType(intType, glu::PRECISION_HIGHP))); 1704 m_spec.source = "out0 = frexp(in0, out1);"; 1705 } 1706 1707 void getInputValues (int numValues, void* const* values) const 1708 { 1709 const Vec2 ranges[] = 1710 { 1711 Vec2(-2.0f, 2.0f), // lowp 1712 Vec2(-1e3f, 1e3f), // mediump 1713 Vec2(-1e7f, 1e7f) // highp 1714 }; 1715 1716 de::Random rnd (deStringHash(getName()) ^ 0x2790au); 1717 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1718 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1719 const int scalarSize = glu::getDataTypeScalarSize(type); 1720 1721 // Special cases 1722 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1723 { 1724 ((float*)values[0])[scalarSize*0 + compNdx] = 0.0f; 1725 ((float*)values[0])[scalarSize*1 + compNdx] = -0.0f; 1726 ((float*)values[0])[scalarSize*2 + compNdx] = 0.5f; 1727 ((float*)values[0])[scalarSize*3 + compNdx] = -0.5f; 1728 ((float*)values[0])[scalarSize*4 + compNdx] = 1.0f; 1729 ((float*)values[0])[scalarSize*5 + compNdx] = -1.0f; 1730 ((float*)values[0])[scalarSize*6 + compNdx] = 2.0f; 1731 ((float*)values[0])[scalarSize*7 + compNdx] = -2.0f; 1732 } 1733 1734 fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[0] + 8*scalarSize, (numValues-8)*scalarSize); 1735 } 1736 1737 bool compare (const void* const* inputs, const void* const* outputs) 1738 { 1739 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1740 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1741 const int scalarSize = glu::getDataTypeScalarSize(type); 1742 const bool signedZero = supportsSignedZero(precision); 1743 1744 const int mantissaBits = getMinMantissaBits(precision); 1745 const deUint32 maxUlpDiff = getMaxUlpDiffFromBits(mantissaBits); 1746 1747 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1748 { 1749 const float in0 = ((const float*)inputs[0])[compNdx]; 1750 const float out0 = ((const float*)outputs[0])[compNdx]; 1751 const int out1 = ((const int*)outputs[1])[compNdx]; 1752 1753 float refOut0; 1754 int refOut1; 1755 1756 frexp(in0, &refOut0, &refOut1); 1757 1758 const deUint32 ulpDiff0 = signedZero ? getUlpDiff(out0, refOut0) : getUlpDiffIgnoreZeroSign(out0, refOut0); 1759 1760 if (ulpDiff0 > maxUlpDiff || out1 != refOut1) 1761 { 1762 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(refOut0) << ", " << refOut1 << " with ULP threshold " 1763 << tcu::toHex(maxUlpDiff) << ", got ULP diff " << tcu::toHex(ulpDiff0); 1764 return false; 1765 } 1766 } 1767 1768 return true; 1769 } 1770}; 1771 1772class LdexpCase : public CommonFunctionCase 1773{ 1774public: 1775 LdexpCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 1776 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "ldexp", shaderType) 1777 { 1778 const int vecSize = glu::getDataTypeScalarSize(baseType); 1779 const glu::DataType intType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT; 1780 1781 m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision))); 1782 m_spec.inputs.push_back(Symbol("in1", glu::VarType(intType, glu::PRECISION_HIGHP))); 1783 m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, glu::PRECISION_HIGHP))); 1784 m_spec.source = "out0 = ldexp(in0, in1);"; 1785 } 1786 1787 void getInputValues (int numValues, void* const* values) const 1788 { 1789 const Vec2 ranges[] = 1790 { 1791 Vec2(-2.0f, 2.0f), // lowp 1792 Vec2(-1e3f, 1e3f), // mediump 1793 Vec2(-1e7f, 1e7f) // highp 1794 }; 1795 1796 de::Random rnd (deStringHash(getName()) ^ 0x2790au); 1797 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1798 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1799 const int scalarSize = glu::getDataTypeScalarSize(type); 1800 int valueNdx = 0; 1801 1802 { 1803 const float easySpecialCases[] = { 0.0f, -0.0f, 0.5f, -0.5f, 1.0f, -1.0f, 2.0f, -2.0f }; 1804 1805 DE_ASSERT(valueNdx + DE_LENGTH_OF_ARRAY(easySpecialCases) <= numValues); 1806 for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(easySpecialCases); caseNdx++) 1807 { 1808 float in0; 1809 int in1; 1810 1811 frexp(easySpecialCases[caseNdx], &in0, &in1); 1812 1813 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1814 { 1815 ((float*)values[0])[valueNdx*scalarSize + compNdx] = in0; 1816 ((int*)values[1])[valueNdx*scalarSize + compNdx] = in1; 1817 } 1818 1819 valueNdx += 1; 1820 } 1821 } 1822 1823 { 1824 // \note lowp and mediump can not necessarily fit the values in hard cases, so we'll use only easy ones. 1825 const int numEasyRandomCases = precision == glu::PRECISION_HIGHP ? 50 : (numValues-valueNdx); 1826 1827 DE_ASSERT(valueNdx + numEasyRandomCases <= numValues); 1828 for (int caseNdx = 0; caseNdx < numEasyRandomCases; caseNdx++) 1829 { 1830 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1831 { 1832 const float in = rnd.getFloat(ranges[precision].x(), ranges[precision].y()); 1833 float in0; 1834 int in1; 1835 1836 frexp(in, &in0, &in1); 1837 1838 ((float*)values[0])[valueNdx*scalarSize + compNdx] = in0; 1839 ((int*)values[1])[valueNdx*scalarSize + compNdx] = in1; 1840 } 1841 1842 valueNdx += 1; 1843 } 1844 } 1845 1846 { 1847 const int numHardRandomCases = numValues-valueNdx; 1848 DE_ASSERT(numHardRandomCases >= 0 && valueNdx + numHardRandomCases <= numValues); 1849 1850 for (int caseNdx = 0; caseNdx < numHardRandomCases; caseNdx++) 1851 { 1852 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1853 { 1854 const int fpExp = rnd.getInt(-126, 127); 1855 const int sign = rnd.getBool() ? -1 : +1; 1856 const deUint32 mantissa = (1u<<23) | (rnd.getUint32() & ((1u<<23)-1)); 1857 const int in1 = rnd.getInt(de::max(-126, -126-fpExp), de::min(127, 127-fpExp)); 1858 const float in0 = tcu::Float32::construct(sign, fpExp, mantissa).asFloat(); 1859 1860 DE_ASSERT(de::inRange(in1, -126, 127)); // See Khronos bug 11180 1861 DE_ASSERT(de::inRange(in1+fpExp, -126, 127)); 1862 1863 const float out = ldexp(in0, in1); 1864 1865 DE_ASSERT(!tcu::Float32(out).isInf() && !tcu::Float32(out).isDenorm()); 1866 DE_UNREF(out); 1867 1868 ((float*)values[0])[valueNdx*scalarSize + compNdx] = in0; 1869 ((int*)values[1])[valueNdx*scalarSize + compNdx] = in1; 1870 } 1871 1872 valueNdx += 1; 1873 } 1874 } 1875 } 1876 1877 bool compare (const void* const* inputs, const void* const* outputs) 1878 { 1879 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1880 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1881 const int scalarSize = glu::getDataTypeScalarSize(type); 1882 const bool signedZero = supportsSignedZero(precision); 1883 1884 const int mantissaBits = getMinMantissaBits(precision); 1885 const deUint32 maxUlpDiff = getMaxUlpDiffFromBits(mantissaBits); 1886 1887 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 1888 { 1889 const float in0 = ((const float*)inputs[0])[compNdx]; 1890 const int in1 = ((const int*)inputs[1])[compNdx]; 1891 const float out0 = ((const float*)outputs[0])[compNdx]; 1892 const float refOut0 = ldexp(in0, in1); 1893 const deUint32 ulpDiff = signedZero ? getUlpDiff(out0, refOut0) : getUlpDiffIgnoreZeroSign(out0, refOut0); 1894 1895 const int inExp = tcu::Float32(in0).exponent(); 1896 1897 if (ulpDiff > maxUlpDiff) 1898 { 1899 m_failMsg << "Expected [" << compNdx << "] = " << HexFloat(refOut0) << ", (exp = " << inExp << ") with ULP threshold " 1900 << tcu::toHex(maxUlpDiff) << ", got ULP diff " << tcu::toHex(ulpDiff); 1901 return false; 1902 } 1903 } 1904 1905 return true; 1906 } 1907}; 1908 1909class FmaCase : public CommonFunctionCase 1910{ 1911public: 1912 FmaCase (Context& context, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType) 1913 : CommonFunctionCase(context, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "fma", shaderType) 1914 { 1915 m_spec.inputs.push_back(Symbol("a", glu::VarType(baseType, precision))); 1916 m_spec.inputs.push_back(Symbol("b", glu::VarType(baseType, precision))); 1917 m_spec.inputs.push_back(Symbol("c", glu::VarType(baseType, precision))); 1918 m_spec.outputs.push_back(Symbol("res", glu::VarType(baseType, precision))); 1919 m_spec.source = "res = fma(a, b, c);"; 1920 m_spec.globalDeclarations = "#extension GL_EXT_gpu_shader5 : require\n"; 1921 } 1922 1923 void init (void) 1924 { 1925 if (!m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5")) 1926 throw tcu::NotSupportedError("GL_EXT_gpu_shader5 not supported"); 1927 1928 CommonFunctionCase::init(); 1929 } 1930 1931 void getInputValues (int numValues, void* const* values) const 1932 { 1933 const Vec2 ranges[] = 1934 { 1935 Vec2(-2.0f, 2.0f), // lowp 1936 Vec2(-127.f, 127.f), // mediump 1937 Vec2(-1e7f, 1e7f) // highp 1938 }; 1939 1940 de::Random rnd (deStringHash(getName()) ^ 0xac23fu); 1941 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 1942 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 1943 const int scalarSize = glu::getDataTypeScalarSize(type); 1944 const int numMantissaBits = getMinMantissaBits(precision); 1945 const int maxNormalizedValueExponent = getMaxNormalizedValueExponent(precision); 1946 const int minNormalizedValueExponent = getMinNormalizedValueExponent(precision); 1947 const deUint32 representableMantissaMask = ((deUint32(1) << numMantissaBits) - 1) << (23 - (deUint32)numMantissaBits); 1948 const float specialCases[][3] = 1949 { 1950 // a b c 1951 { 0.0f, 0.0f, 0.0f }, 1952 { 0.0f, 1.0f, 0.0f }, 1953 { 0.0f, 0.0f, -1.0f }, 1954 { 1.0f, 1.0f, 0.0f }, 1955 { 1.0f, 1.0f, 1.0f }, 1956 { -1.0f, 1.0f, 0.0f }, 1957 { 1.0f, -1.0f, 0.0f }, 1958 { -1.0f, -1.0f, 0.0f }, 1959 { -0.0f, 1.0f, 0.0f }, 1960 { 1.0f, -0.0f, 0.0f } 1961 }; 1962 const int numSpecialCases = DE_LENGTH_OF_ARRAY(specialCases); 1963 1964 // Special cases 1965 for (int caseNdx = 0; caseNdx < numSpecialCases; caseNdx++) 1966 { 1967 for (int inputNdx = 0; inputNdx < 3; inputNdx++) 1968 { 1969 for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++) 1970 ((float*)values[inputNdx])[caseNdx*scalarSize + scalarNdx] = specialCases[caseNdx][inputNdx]; 1971 } 1972 } 1973 1974 // Random cases. 1975 { 1976 const int numScalars = (numValues-numSpecialCases)*scalarSize; 1977 const int offs = scalarSize*numSpecialCases; 1978 1979 for (int inputNdx = 0; inputNdx < 3; inputNdx++) 1980 fillRandomScalars(rnd, ranges[precision].x(), ranges[precision].y(), (float*)values[inputNdx] + offs, numScalars); 1981 } 1982 1983 // Make sure the values are representable in the target format 1984 if (precision != glu::PRECISION_HIGHP) 1985 { 1986 const float largestRepresentableValue = tcu::Float32::constructBits(+1, maxNormalizedValueExponent, ((1u << numMantissaBits) - 1u) << (23u - (deUint32)numMantissaBits)).asFloat(); 1987 1988 // zero is not required to be representable, use smallest positive non-subnormal value 1989 const float zeroReplacement = tcu::Float32::constructBits(+1, minNormalizedValueExponent, 1).asFloat(); 1990 1991 for (int inputNdx = 0; inputNdx < 3; inputNdx++) 1992 { 1993 for (int caseNdx = 0; caseNdx < numValues; ++caseNdx) 1994 { 1995 for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++) 1996 { 1997 float& value = ((float*)values[inputNdx])[caseNdx * scalarSize + scalarNdx]; 1998 const tcu::Float32 float32Representation (value); 1999 2000 // flush too small values to zero 2001 if (float32Representation.exponent() < minNormalizedValueExponent) 2002 { 2003 value = zeroReplacement; 2004 } 2005 // clamp too large values 2006 else if (float32Representation.exponent() > maxNormalizedValueExponent) 2007 { 2008 value = (float32Representation.sign() == +1) ? (largestRepresentableValue) : (-largestRepresentableValue); 2009 } 2010 // remove unrepresentable mantissa bits 2011 else 2012 { 2013 const tcu::Float32 targetRepresentation (tcu::Float32::constructBits(float32Representation.sign(), 2014 float32Representation.exponent(), 2015 float32Representation.mantissaBits() & representableMantissaMask)); 2016 2017 value = targetRepresentation.asFloat(); 2018 } 2019 } 2020 } 2021 } 2022 } 2023 } 2024 2025 static tcu::Interval fma (glu::Precision precision, float a, float b, float c) 2026 { 2027 const tcu::FloatFormat formats[] = 2028 { 2029 // minExp maxExp mantissa exact, subnormals infinities NaN 2030 tcu::FloatFormat(0, 0, 7, false, tcu::YES, tcu::MAYBE, tcu::MAYBE), 2031 tcu::FloatFormat(-13, 13, 9, false, tcu::MAYBE, tcu::MAYBE, tcu::MAYBE), 2032 tcu::FloatFormat(-126, 127, 23, true, tcu::MAYBE, tcu::YES, tcu::MAYBE) 2033 }; 2034 const tcu::FloatFormat& format = de::getSizedArrayElement<glu::PRECISION_LAST>(formats, precision); 2035 const tcu::Interval ia = format.convert(a); 2036 const tcu::Interval ib = format.convert(b); 2037 const tcu::Interval ic = format.convert(c); 2038 tcu::Interval prod0; 2039 tcu::Interval prod1; 2040 tcu::Interval prod2; 2041 tcu::Interval prod3; 2042 tcu::Interval prod; 2043 tcu::Interval res; 2044 2045 TCU_SET_INTERVAL(prod0, tmp, tmp = ia.lo() * ib.lo()); 2046 TCU_SET_INTERVAL(prod1, tmp, tmp = ia.lo() * ib.hi()); 2047 TCU_SET_INTERVAL(prod2, tmp, tmp = ia.hi() * ib.lo()); 2048 TCU_SET_INTERVAL(prod3, tmp, tmp = ia.hi() * ib.hi()); 2049 2050 prod = format.convert(format.roundOut(prod0 | prod1 | prod2 | prod3, ia.isFinite() && ib.isFinite())); 2051 2052 TCU_SET_INTERVAL_BOUNDS(res, tmp, 2053 tmp = prod.lo() + ic.lo(), 2054 tmp = prod.hi() + ic.hi()); 2055 2056 return format.convert(format.roundOut(res, prod.isFinite() && ic.isFinite())); 2057 } 2058 2059 bool compare (const void* const* inputs, const void* const* outputs) 2060 { 2061 const glu::DataType type = m_spec.inputs[0].varType.getBasicType(); 2062 const glu::Precision precision = m_spec.inputs[0].varType.getPrecision(); 2063 const int scalarSize = glu::getDataTypeScalarSize(type); 2064 2065 for (int compNdx = 0; compNdx < scalarSize; compNdx++) 2066 { 2067 const float a = ((const float*)inputs[0])[compNdx]; 2068 const float b = ((const float*)inputs[1])[compNdx]; 2069 const float c = ((const float*)inputs[2])[compNdx]; 2070 const float res = ((const float*)outputs[0])[compNdx]; 2071 const tcu::Interval ref = fma(precision, a, b, c); 2072 2073 if (!ref.contains(res)) 2074 { 2075 m_failMsg << "Expected [" << compNdx << "] = " << ref; 2076 return false; 2077 } 2078 } 2079 2080 return true; 2081 } 2082}; 2083 2084ShaderCommonFunctionTests::ShaderCommonFunctionTests (Context& context) 2085 : TestCaseGroup(context, "common", "Common function tests") 2086{ 2087} 2088 2089ShaderCommonFunctionTests::~ShaderCommonFunctionTests (void) 2090{ 2091} 2092 2093template<class TestClass> 2094static void addFunctionCases (TestCaseGroup* parent, const char* functionName, bool floatTypes, bool intTypes, bool uintTypes, deUint32 shaderBits) 2095{ 2096 tcu::TestCaseGroup* group = new tcu::TestCaseGroup(parent->getTestContext(), functionName, functionName); 2097 parent->addChild(group); 2098 2099 const glu::DataType scalarTypes[] = 2100 { 2101 glu::TYPE_FLOAT, 2102 glu::TYPE_INT, 2103 glu::TYPE_UINT 2104 }; 2105 2106 for (int scalarTypeNdx = 0; scalarTypeNdx < DE_LENGTH_OF_ARRAY(scalarTypes); scalarTypeNdx++) 2107 { 2108 const glu::DataType scalarType = scalarTypes[scalarTypeNdx]; 2109 2110 if ((!floatTypes && scalarType == glu::TYPE_FLOAT) || 2111 (!intTypes && scalarType == glu::TYPE_INT) || 2112 (!uintTypes && scalarType == glu::TYPE_UINT)) 2113 continue; 2114 2115 for (int vecSize = 1; vecSize <= 4; vecSize++) 2116 { 2117 for (int prec = glu::PRECISION_LOWP; prec <= glu::PRECISION_HIGHP; prec++) 2118 { 2119 for (int shaderTypeNdx = 0; shaderTypeNdx < glu::SHADERTYPE_LAST; shaderTypeNdx++) 2120 { 2121 if (shaderBits & (1<<shaderTypeNdx)) 2122 group->addChild(new TestClass(parent->getContext(), glu::DataType(scalarType + vecSize - 1), glu::Precision(prec), glu::ShaderType(shaderTypeNdx))); 2123 } 2124 } 2125 } 2126 } 2127} 2128 2129void ShaderCommonFunctionTests::init (void) 2130{ 2131 enum 2132 { 2133 VS = (1<<glu::SHADERTYPE_VERTEX), 2134 TC = (1<<glu::SHADERTYPE_TESSELLATION_CONTROL), 2135 TE = (1<<glu::SHADERTYPE_TESSELLATION_EVALUATION), 2136 GS = (1<<glu::SHADERTYPE_GEOMETRY), 2137 FS = (1<<glu::SHADERTYPE_FRAGMENT), 2138 CS = (1<<glu::SHADERTYPE_COMPUTE), 2139 2140 ALL_SHADERS = VS|TC|TE|GS|FS|CS, 2141 NEW_SHADERS = TC|TE|GS|CS, 2142 }; 2143 2144 // Float? Int? Uint? Shaders 2145 addFunctionCases<AbsCase> (this, "abs", true, true, false, NEW_SHADERS); 2146 addFunctionCases<SignCase> (this, "sign", true, true, false, NEW_SHADERS); 2147 addFunctionCases<FloorCase> (this, "floor", true, false, false, NEW_SHADERS); 2148 addFunctionCases<TruncCase> (this, "trunc", true, false, false, NEW_SHADERS); 2149 addFunctionCases<RoundCase> (this, "round", true, false, false, NEW_SHADERS); 2150 addFunctionCases<RoundEvenCase> (this, "roundeven", true, false, false, NEW_SHADERS); 2151 addFunctionCases<CeilCase> (this, "ceil", true, false, false, NEW_SHADERS); 2152 addFunctionCases<FractCase> (this, "fract", true, false, false, NEW_SHADERS); 2153 // mod 2154 addFunctionCases<ModfCase> (this, "modf", true, false, false, NEW_SHADERS); 2155 // min 2156 // max 2157 // clamp 2158 // mix 2159 // step 2160 // smoothstep 2161 addFunctionCases<IsnanCase> (this, "isnan", true, false, false, NEW_SHADERS); 2162 addFunctionCases<IsinfCase> (this, "isinf", true, false, false, NEW_SHADERS); 2163 addFunctionCases<FloatBitsToIntCase> (this, "floatbitstoint", true, false, false, NEW_SHADERS); 2164 addFunctionCases<FloatBitsToUintCase> (this, "floatbitstouint", true, false, false, NEW_SHADERS); 2165 2166 addFunctionCases<FrexpCase> (this, "frexp", true, false, false, ALL_SHADERS); 2167 addFunctionCases<LdexpCase> (this, "ldexp", true, false, false, ALL_SHADERS); 2168 addFunctionCases<FmaCase> (this, "fma", true, false, false, ALL_SHADERS); 2169 2170 // (u)intBitsToFloat() 2171 { 2172 const deUint32 shaderBits = NEW_SHADERS; 2173 tcu::TestCaseGroup* intGroup = new tcu::TestCaseGroup(m_testCtx, "intbitstofloat", "intBitsToFloat() Tests"); 2174 tcu::TestCaseGroup* uintGroup = new tcu::TestCaseGroup(m_testCtx, "uintbitstofloat", "uintBitsToFloat() Tests"); 2175 2176 addChild(intGroup); 2177 addChild(uintGroup); 2178 2179 for (int vecSize = 1; vecSize < 4; vecSize++) 2180 { 2181 const glu::DataType intType = vecSize > 1 ? glu::getDataTypeIntVec(vecSize) : glu::TYPE_INT; 2182 const glu::DataType uintType = vecSize > 1 ? glu::getDataTypeUintVec(vecSize) : glu::TYPE_UINT; 2183 2184 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++) 2185 { 2186 if (shaderBits & (1<<shaderType)) 2187 { 2188 intGroup->addChild(new BitsToFloatCase(m_context, intType, glu::ShaderType(shaderType))); 2189 uintGroup->addChild(new BitsToFloatCase(m_context, uintType, glu::ShaderType(shaderType))); 2190 } 2191 } 2192 } 2193 } 2194} 2195 2196} // Functional 2197} // gles31 2198} // deqp 2199