/external/clang/test/Sema/ |
H A D | variadic-promotion.c | 5 void test_floating_promotion(__fp16 *f16, float f32, double f64) { argument 6 variadic(3, *f16, f32, f64);
|
/external/clang/test/CodeGen/ |
H A D | mms-bitfields.c | 5 long long f64; member in struct:s1 12 long long f64[4]; member in struct:s2
|
H A D | ms_struct.c | 6 long long f64; member in struct:s1 13 long long f64[4]; member in struct:s2
|
/external/valgrind/VEX/useful/ |
H A D | fp_80_64.c | 44 static void convert_f80le_to_f64le_HW ( /*IN*/UChar* f80, /*OUT*/UChar* f64 ) 48 : "r" (&f80[0]), "r" (&f64[0]) 52 static void convert_f64le_to_f80le_HW ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) argument 56 : "r" (&f64[0]), "r" (&f80[0]) 103 static void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) argument 109 sign = toUChar( (f64[7] >> 7) & 1 ); 110 bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F); 120 (f64[6] & 0x0F) == 0 121 && f64[ 507 do_64_to_80_test( Int test_no, UChar* f64, UChar* f80h, UChar* f80s) argument 593 UChar* f64 = malloc(8); local [all...] |
/external/v8/test/mjsunit/compiler/ |
H A D | regress-f64-w32-change.js | 13 var f64 = x ? -1 : b; 14 f64use = f64 + 0.5; 15 var w32 = x ? 1 : f64;
|
H A D | regress-store-holey-double-array.js | 18 var f64 = new Float64Array(b); 22 g(f64, a, 1); 23 g(f64, a, 1); 25 g(f64, a, 0);
|
/external/vulkan-validation-layers/libs/glm/gtx/ |
H A D | number_precision.hpp | 69 typedef f64 f64vec1; //!< \brief Single-precision floating-point scalar. (from GLM_GTX_number_precision extension) 76 typedef f64 f64mat1; //!< \brief Double-precision floating-point scalar. (from GLM_GTX_number_precision extension) 77 typedef f64 f64mat1x1; //!< \brief Double-precision floating-point scalar. (from GLM_GTX_number_precision extension)
|
/external/llvm/test/MC/ARM/ |
H A D | single-precision-fp.s | 5 vadd.f64 d0, d1, d2 6 vsub.f64 d2, d3, d4 7 vdiv.f64 d4, d5, d6 8 vmul.f64 d6, d7, d8 9 vnmul.f64 d8, d9, d10 11 @ CHECK-ERRORS-NEXT: vadd.f64 d0, d1, d2 13 @ CHECK-ERRORS-NEXT: vsub.f64 d2, d3, d4 15 @ CHECK-ERRORS-NEXT: vdiv.f64 d4, d5, d6 17 @ CHECK-ERRORS-NEXT: vmul.f64 d6, d7, d8 19 @ CHECK-ERRORS-NEXT: vnmul.f64 d [all...] |
H A D | d16.s | 7 @ D16-NEXT: vadd.f64 d1, d2, d16 8 vadd.f64 d1, d2, d16 11 @ D16-NEXT: vadd.f64 d1, d17, d6 12 vadd.f64 d1, d17, d6 15 @ D16-NEXT: vadd.f64 d19, d7, d6 16 vadd.f64 d19, d7, d6 19 @ D16-NEXT: vcvt.f64.f32 d22, s4 20 vcvt.f64.f32 d22, s4 23 @ D16-NEXT: vcvt.f32.f64 s26, d30 24 vcvt.f32.f64 s2 [all...] |
H A D | fp-armv8.s | 5 vcvtt.f64.f16 d3, s1 6 @ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee] 7 vcvtt.f16.f64 s5, d12 8 @ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee] 10 vcvtb.f64.f16 d3, s1 11 @ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0x60,0x3b,0xb2,0xee] 12 vcvtb.f16.f64 s4, d1 13 @ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0x41,0x2b,0xb3,0xee] 15 vcvttge.f64.f16 d3, s1 16 @ CHECK: vcvttge.f64 [all...] |
H A D | thumb-fp-armv8.s | 5 vcvtt.f64.f16 d3, s1 6 @ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b] 7 vcvtt.f16.f64 s5, d12 8 @ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xf3,0xee,0xcc,0x2b] 10 vcvtb.f64.f16 d3, s1 11 @ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0x60,0x3b] 12 vcvtb.f16.f64 s4, d1 13 @ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0xb3,0xee,0x41,0x2b] 16 vcvttge.f64.f16 d3, s1 17 @ CHECK: vcvttge.f64 [all...] |
H A D | directive-arch_extension-simd.s | 24 vmaxnm.f64 d0, d0, d0 26 vminnm.f64 d0, d0, d0 33 vcvta.s32.f64 s0, d0 35 vcvta.u32.f64 s0, d0 41 vcvtn.s32.f64 s0, d0 43 vcvtn.u32.f64 s0, d0 49 vcvtp.s32.f64 s0, d0 51 vcvtp.u32.f64 s0, d0 57 vcvtm.s32.f64 s0, d0 59 vcvtm.u32.f64 s [all...] |
H A D | invalid-fp-armv8.s | 5 vcvtt.f64.f16 d3, s1 6 @ V7-NOT: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee] 7 vcvtt.f16.f64 s5, d12 8 @ V7-NOT: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee] 39 vselgt.f64 s3, s2, s1 43 vselgt.f64 q0, s3, q1 48 vminnm.f64 s3, s2, s1 52 vmaxnm.f64 q0, s3, q1 54 vmaxnmgt.f64 q0, s3, q1 57 vcvta.s32.f64 d [all...] |
H A D | directive-arch_extension-fp.s | 35 vselgt.f64 d0, d0, d0 37 vselge.f64 d0, d0, d0 39 vseleq.f64 d0, d0, d0 41 vselvs.f64 d0, d0, d0 43 vmaxnm.f64 d0, d0, d0 45 vminnm.f64 d0, d0, d0 48 vcvtb.f64.f16 d0, s0 50 vcvtb.f16.f64 s0, d0 52 vcvtt.f64.f16 d0, s0 54 vcvtt.f16.f64 s [all...] |
/external/valgrind/VEX/priv/ |
H A D | guest_generic_x87.c | 105 void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) argument 111 sign = toUChar( (f64[7] >> 7) & 1 ); 112 bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F); 122 (f64[6] & 0x0F) == 0 123 && f64[5] == 0 && f64[4] == 0 && f64[3] == 0 124 && f64[2] == 0 && f64[ [all...] |
/external/valgrind/none/tests/amd64/ |
H A D | nan80and64.c | 61 static void rev64 ( UChar* f64 ) 63 SWAPC( f64[0], f64[7] ); 64 SWAPC( f64[1], f64[6] ); 65 SWAPC( f64[2], f64[5] ); 66 SWAPC( f64[3], f64[4] );
|
H A D | bug132918.c | 13 double f64; local 28 : /*in*/ "m" (f64), "m" (xx), "m" (yy) 31 res->d = f64;
|
/external/compiler-rt/lib/builtins/arm/ |
H A D | adddf3vfp.S | 23 vadd.f64 d6, d6, d7
|
H A D | divdf3vfp.S | 23 vdiv.f64 d5, d6, d7
|
H A D | eqdf2vfp.S | 24 vcmp.f64 d6, d7
|
H A D | extendsfdf2vfp.S | 23 vcvt.f64.f32 d7, s15 // convert single to double
|
H A D | fixdfsivfp.S | 23 vcvt.s32.f64 s15, d7 // convert double to 32-bit int into s15
|
H A D | fixunsdfsivfp.S | 24 vcvt.u32.f64 s15, d7 // convert double to 32-bit int into s15
|
H A D | floatsidfvfp.S | 23 vcvt.f64.s32 d7, s15 // convert 32-bit int in s15 to double in d7
|
H A D | floatunssidfvfp.S | 23 vcvt.f64.u32 d7, s15 // convert 32-bit int in s15 to double in d7
|