Searched refs:a2 (Results 301 - 325 of 920) sorted by path

<<11121314151617181920>>

/external/clang/test/CXX/temp/temp.decls/temp.variadic/
H A Dsizeofpack.cpp33 array<2> a2; local
34 int aa2 = make_array<int>(a2, 0L, "abc");
/external/clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/
H A Dp3.cpp20 A<volatile int> a2 = f1(vi); local
50 A<volatile int> a2 = f2(vi); local
/external/clang/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/
H A Dp22.cpp9 template<class T1, class T2> double& f(T1 a1, T2 a2);
/external/clang/test/CodeGen/
H A Darm-homogenous.c168 double a2; member in struct:__anon18602
204 __short4 a2; member in struct:__anon18603
H A Darm64_vCMP.c14 int64_t test_vceqd_s64(int64_t a1, int64_t a2) { argument
16 return vceqd_s64(a1, a2);
17 // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp eq i64 %a1, %a2
21 int64_t test_vceqd_f64(float64_t a1, float64_t a2) { argument
23 return vceqd_f64(a1, a2);
24 // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = fcmp oeq double %a1, %a2
28 uint64_t test_vcgtd_u64(uint64_t a1, uint64_t a2) { argument
30 return vcgtd_u64(a1, a2);
31 // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp ugt i64 %a1, %a2
35 uint64_t test_vcled_u64(uint64_t a1, uint64_t a2) { argument
49 test_vceqq_u64(uint64x2_t a1, uint64x2_t a2) argument
55 test_vcgeq_s64(int64x2_t a1, int64x2_t a2) argument
61 test_vcgeq_u64(uint64x2_t a1, uint64x2_t a2) argument
67 test_vcgtq_s64(int64x2_t a1, int64x2_t a2) argument
73 test_vcgtq_u64(uint64x2_t a1, uint64x2_t a2) argument
79 test_vcleq_s64(int64x2_t a1, int64x2_t a2) argument
85 test_vcleq_u64(uint64x2_t a1, uint64x2_t a2) argument
91 test_vcltq_s64(int64x2_t a1, int64x2_t a2) argument
97 test_vcltq_u64(uint64x2_t a1, uint64x2_t a2) argument
[all...]
H A Darm64_vLdStNum_lane.c6 int64x2x2_t test_vld2q_lane_s64(const void * a1, int64x2x2_t a2) { argument
8 return vld2q_lane_s64(a1, a2, 1);
12 uint64x2x2_t test_vld2q_lane_u64(const void * a1, uint64x2x2_t a2) { argument
14 return vld2q_lane_u64(a1, a2, 1);
18 int64x1x2_t test_vld2_lane_s64(const void * a1, int64x1x2_t a2) { argument
20 return vld2_lane_s64(a1, a2, 0);
24 uint64x1x2_t test_vld2_lane_u64(const void * a1, uint64x1x2_t a2) { argument
26 return vld2_lane_u64(a1, a2, 0);
30 poly8x16x2_t test_vld2q_lane_p8(const void * a1, poly8x16x2_t a2) { argument
32 return vld2q_lane_p8(a1, a2,
37 test_vld2q_lane_u8(const void * a1, uint8x16x2_t a2) argument
43 test_vld3q_lane_s64(const void * a1, int64x2x3_t a2) argument
49 test_vld3q_lane_u64(const void * a1, uint64x2x3_t a2) argument
55 test_vld3_lane_s64(const void * a1, int64x1x3_t a2) argument
61 test_vld3_lane_u64(const void * a1, uint64x1x3_t a2) argument
67 test_vld3_lane_s8(const void * a1, int8x8x3_t a2) argument
73 test_vld3q_lane_p8(const void * a1, poly8x16x3_t a2) argument
79 test_vld3q_lane_u8(const void * a1, uint8x16x3_t a2) argument
85 test_vld4q_lane_s64(const void * a1, int64x2x4_t a2) argument
91 test_vld4q_lane_u64(const void * a1, uint64x2x4_t a2) argument
97 test_vld4_lane_s64(const void * a1, int64x1x4_t a2) argument
103 test_vld4_lane_u64(const void * a1, uint64x1x4_t a2) argument
109 test_vld4_lane_s8(const void * a1, int8x8x4_t a2) argument
115 test_vld4_lane_u8(const void * a1, uint8x8x4_t a2) argument
121 test_vld4q_lane_p8(const void * a1, poly8x16x4_t a2) argument
127 test_vld4q_lane_s8(const void * a1, int8x16x4_t a2) argument
136 test_vld4q_lane_u8(const void * a1, uint8x16x4_t a2) argument
[all...]
H A Darm64_vMaxMin.c22 uint8x8_t test_vmin_u8(uint8x8_t a1, uint8x8_t a2) { argument
24 return vmin_u8(a1, a2);
28 uint8x16_t test_vminq_u8(uint8x16_t a1, uint8x16_t a2) { argument
30 return vminq_u8(a1, a2);
34 int16x8_t test_vmaxq_s16(int16x8_t a1, int16x8_t a2) { argument
36 return vmaxq_s16(a1, a2);
41 float64x2_t test_vmaxq_f64(float64x2_t a1, float64x2_t a2) { argument
43 return vmaxq_f64(a1, a2);
47 float32x4_t test_vmaxq_f32(float32x4_t a1, float32x4_t a2) { argument
49 return vmaxq_f32(a1, a2);
53 test_vminq_f64(float64x2_t a1, float64x2_t a2) argument
59 test_vmax_f32(float32x2_t a1, float32x2_t a2) argument
65 test_vmax_s32(int32x2_t a1, int32x2_t a2) argument
71 test_vmin_u32(uint32x2_t a1, uint32x2_t a2) argument
[all...]
H A Darm64_vca.c6 uint32x2_t test_vcale_f32(float32x2_t a1, float32x2_t a2) { argument
8 return vcale_f32(a1, a2);
13 uint32x4_t test_vcaleq_f32(float32x4_t a1, float32x4_t a2) { argument
15 return vcaleq_f32(a1, a2);
16 // CHECK: llvm.aarch64.neon.facge.v4i32.v4f32{{.*a2,.*a1}}
20 uint32x2_t test_vcalt_f32(float32x2_t a1, float32x2_t a2) { argument
22 return vcalt_f32(a1, a2);
23 // CHECK: llvm.aarch64.neon.facgt.v2i32.v2f32{{.*a2,.*a1}}
27 uint32x4_t test_vcaltq_f32(float32x4_t a1, float32x4_t a2) { argument
29 return vcaltq_f32(a1, a2);
33 test_vcagtq_f64(float64x2_t a1, float64x2_t a2) argument
40 test_vcaltq_f64(float64x2_t a1, float64x2_t a2) argument
47 test_vcageq_f64(float64x2_t a1, float64x2_t a2) argument
54 test_vcaleq_f64(float64x2_t a1, float64x2_t a2) argument
[all...]
H A Darm64_vcopy.c7 int8x16_t test_vcopyq_laneq_s8(int8x16_t a1, int8x16_t a2) { argument
9 return vcopyq_laneq_s8(a1, (int64_t) 3, a2, (int64_t) 13);
10 // CHECK: shufflevector <16 x i8> %a1, <16 x i8> %a2, <16 x i32> <i32 0, i32 1, i32 2, i32 29, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
13 uint8x16_t test_vcopyq_laneq_u8(uint8x16_t a1, uint8x16_t a2) { argument
15 return vcopyq_laneq_u8(a1, (int64_t) 3, a2, (int64_t) 13);
16 // CHECK: shufflevector <16 x i8> %a1, <16 x i8> %a2, <16 x i32> <i32 0, i32 1, i32 2, i32 29, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
20 int16x8_t test_vcopyq_laneq_s16(int16x8_t a1, int16x8_t a2) { argument
22 return vcopyq_laneq_s16(a1, (int64_t) 3, a2, (int64_t) 7);
23 // CHECK: shufflevector <8 x i16> %a1, <8 x i16> %a2, <8 x i32> <i32 0, i32 1, i32 2, i32 15, i32 4, i32 5, i32 6, i32 7>
27 uint16x8_t test_vcopyq_laneq_u16(uint16x8_t a1, uint16x8_t a2) { argument
34 test_vcopyq_laneq_s32(int32x4_t a1, int32x4_t a2) argument
40 test_vcopyq_laneq_u32(uint32x4_t a1, uint32x4_t a2) argument
46 test_vcopyq_laneq_s64(int64x2_t a1, int64x2_t a2) argument
52 test_vcopyq_laneq_u64(uint64x2_t a1, uint64x2_t a2) argument
58 test_vcopyq_laneq_f32(float32x4_t a1, float32x4_t a2) argument
64 test_vcopyq_laneq_f64(float64x2_t a1, float64x2_t a2) argument
[all...]
H A Darm64_vfma.c6 float32x2_t test_vfma_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) { argument
8 return vfma_f32(a1, a2, a3);
9 // CHECK: llvm.fma.v2f32({{.*a2, .*a3, .*a1}})
13 float32x4_t test_vfmaq_f32(float32x4_t a1, float32x4_t a2, float32x4_t a3) { argument
15 return vfmaq_f32(a1, a2, a3);
16 // CHECK: llvm.fma.v4f32({{.*a2, .*a3, .*a1}})
20 float64x2_t test_vfmaq_f64(float64x2_t a1, float64x2_t a2, float64x2_t a3) { argument
22 return vfmaq_f64(a1, a2, a3);
23 // CHECK: llvm.fma.v2f64({{.*a2, .*a3, .*a1}})
27 float32x2_t test_vfma_lane_f32(float32x2_t a1, float32x2_t a2, float32x2_ argument
36 test_vfmaq_lane_f32(float32x4_t a1, float32x4_t a2, float32x2_t a3) argument
45 test_vfmaq_lane_f64(float64x2_t a1, float64x2_t a2, float64x1_t a3) argument
54 test_vfma_n_f32(float32x2_t a1, float32x2_t a2, float32_t a3) argument
63 test_vfmaq_n_f32(float32x4_t a1, float32x4_t a2, float32_t a3) argument
72 test_vfmaq_n_f64(float64x2_t a1, float64x2_t a2, float64_t a3) argument
81 test_vfms_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) argument
89 test_vfmsq_f32(float32x4_t a1, float32x4_t a2, float32x4_t a3) argument
97 test_vfmsq_f64(float64x2_t a1, float64x2_t a2, float64x2_t a3) argument
105 test_vfms_lane_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) argument
116 test_vfmsq_lane_f32(float32x4_t a1, float32x4_t a2, float32x2_t a3) argument
127 test_vfmsq_lane_f64(float64x2_t a1, float64x2_t a2, float64x1_t a3) argument
[all...]
H A Darm64_vset_lane.c6 float16x4_t test_vset_lane_f16(float16_t *a1, float16x4_t a2) { argument
8 return vset_lane_f16(*a1, a2, 1);
9 // CHECK insertelement <4 x i16> %a2, i16 %a1, i32 1
12 float16x8_t test_vsetq_lane_f16(float16_t *a1, float16x8_t a2) { argument
14 return vsetq_lane_f16(*a1, a2, 4);
15 // CHECK insertelement <8 x i16> %a2, i16 %a1, i32 4
20 float64x1_t test_vset_lane_f64(float64_t a1, float64x1_t a2) {
22 return vset_lane_f64(a1, a2, 0);
27 float64x2_t test_vsetq_lane_f64(float64_t a1, float64x2_t a2) { argument
29 return vsetq_lane_f64(a1, a2,
[all...]
H A Darm64_vsli.c9 int8x8_t test_vsli_n_s8(int8x8_t a1, int8x8_t a2) { argument
11 return vsli_n_s8(a1, a2, 3);
16 int16x4_t test_vsli_n_s16(int16x4_t a1, int16x4_t a2) { argument
18 return vsli_n_s16(a1, a2, 3);
23 int32x2_t test_vsli_n_s32(int32x2_t a1, int32x2_t a2) { argument
25 return vsli_n_s32(a1, a2, 1);
30 int64x1_t test_vsli_n_s64(int64x1_t a1, int64x1_t a2) { argument
32 return vsli_n_s64(a1, a2, 1);
37 uint8x8_t test_vsli_n_u8(uint8x8_t a1, uint8x8_t a2) { argument
39 return vsli_n_u8(a1, a2,
44 test_vsli_n_u16(uint16x4_t a1, uint16x4_t a2) argument
51 test_vsli_n_u32(uint32x2_t a1, uint32x2_t a2) argument
58 test_vsli_n_u64(uint64x1_t a1, uint64x1_t a2) argument
65 test_vsli_n_p8(poly8x8_t a1, poly8x8_t a2) argument
72 test_vsli_n_p16(poly16x4_t a1, poly16x4_t a2) argument
79 test_vsliq_n_s8(int8x16_t a1, int8x16_t a2) argument
86 test_vsliq_n_s16(int16x8_t a1, int16x8_t a2) argument
93 test_vsliq_n_s32(int32x4_t a1, int32x4_t a2) argument
100 test_vsliq_n_s64(int64x2_t a1, int64x2_t a2) argument
107 test_vsliq_n_u8(uint8x16_t a1, uint8x16_t a2) argument
114 test_vsliq_n_u16(uint16x8_t a1, uint16x8_t a2) argument
121 test_vsliq_n_u32(uint32x4_t a1, uint32x4_t a2) argument
128 test_vsliq_n_u64(uint64x2_t a1, uint64x2_t a2) argument
135 test_vsliq_n_p8(poly8x16_t a1, poly8x16_t a2) argument
142 test_vsliq_n_p16(poly16x8_t a1, poly16x8_t a2) argument
[all...]
H A Darm64_vsri.c10 int8x8_t test_vsri_n_s8(int8x8_t a1, int8x8_t a2) { argument
12 return vsri_n_s8(a1, a2, 3);
17 int16x4_t test_vsri_n_s16(int16x4_t a1, int16x4_t a2) { argument
19 return vsri_n_s16(a1, a2, 3);
24 int32x2_t test_vsri_n_s32(int32x2_t a1, int32x2_t a2) { argument
26 return vsri_n_s32(a1, a2, 1);
31 int64x1_t test_vsri_n_s64(int64x1_t a1, int64x1_t a2) { argument
33 return vsri_n_s64(a1, a2, 1);
38 uint8x8_t test_vsri_n_u8(uint8x8_t a1, uint8x8_t a2) { argument
40 return vsri_n_u8(a1, a2,
45 test_vsri_n_u16(uint16x4_t a1, uint16x4_t a2) argument
52 test_vsri_n_u32(uint32x2_t a1, uint32x2_t a2) argument
59 test_vsri_n_u64(uint64x1_t a1, uint64x1_t a2) argument
66 test_vsri_n_p8(poly8x8_t a1, poly8x8_t a2) argument
73 test_vsri_n_p16(poly16x4_t a1, poly16x4_t a2) argument
80 test_vsriq_n_s8(int8x16_t a1, int8x16_t a2) argument
87 test_vsriq_n_s16(int16x8_t a1, int16x8_t a2) argument
94 test_vsriq_n_s32(int32x4_t a1, int32x4_t a2) argument
101 test_vsriq_n_s64(int64x2_t a1, int64x2_t a2) argument
108 test_vsriq_n_u8(uint8x16_t a1, uint8x16_t a2) argument
115 test_vsriq_n_u16(uint16x8_t a1, uint16x8_t a2) argument
122 test_vsriq_n_u32(uint32x4_t a1, uint32x4_t a2) argument
129 test_vsriq_n_u64(uint64x2_t a1, uint64x2_t a2) argument
136 test_vsriq_n_p8(poly8x16_t a1, poly8x16_t a2) argument
143 test_vsriq_n_p16(poly16x8_t a1, poly16x8_t a2) argument
[all...]
H A Darm64_vtst.c6 uint64x2_t test_vtstq_s64(int64x2_t a1, int64x2_t a2) { argument
8 return vtstq_s64(a1, a2);
9 // CHECK: [[COMMONBITS:%[A-Za-z0-9.]+]] = and <2 x i64> %a1, %a2
15 uint64x2_t test_vtstq_u64(uint64x2_t a1, uint64x2_t a2) { argument
17 return vtstq_u64(a1, a2);
18 // CHECK: [[COMMONBITS:%[A-Za-z0-9.]+]] = and <2 x i64> %a1, %a2
H A Dasm-variable.c4 unsigned long long a1, unsigned long long a2,
17 b2 = a2;
30 double a1, double a2,
46 b2 = a2;
3 foo(unsigned long long addr, unsigned long long a0, unsigned long long a1, unsigned long long a2, unsigned long long a3, unsigned long long a4, unsigned long long a5) argument
29 foo2(unsigned long long addr, double a0, double a1, double a2, double a3, double a4, double a5, double a6, double a7) argument
H A Datomics-inlining.c11 unsigned char a1[100], a2[100]; variable
31 (void)__atomic_load(&a1, &a2, memory_order_seq_cst);
32 (void)__atomic_store(&a1, &a2, memory_order_seq_cst);
43 // ARM: call arm_aapcscc void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0)
44 // ARM: call arm_aapcscc void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0)
55 // PPC32: call void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0)
56 // PPC32: call void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0)
67 // PPC64: call void @__atomic_load(i64 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0)
68 // PPC64: call void @__atomic_store(i64 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i32 0, i32 0)
79 // MIPS32: call void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8]* @a2, i3
[all...]
H A Dconst-init.c9 char a2[2][5] = { "asdf" }; variable
10 // CHECK: @a2 = global [2 x [5 x i8]] {{\[}}[5 x i8] c"asdf\00", [5 x i8] zeroinitializer]
H A Dexprs.c94 int f6(int a0, struct s6 a1, struct s6 a2) { argument
95 return (a0 ? a1 : a2).f0;
H A Dle32-libcall-pow.c13 void test_pow(float a0, double a1, long double a2) { argument
21 long double l2 = powl(a2, a2);
H A Dlibcalls.c8 void test_sqrt(float a0, double a1, long double a2) { argument
23 long double l2 = sqrtl(a2);
38 void test_pow(float a0, double a1, long double a2) { argument
49 long double l2 = powl(a2, a2);
61 void test_fma(float a0, double a1, long double a2) { argument
72 long double l2 = fmal(a2, a2, a2);
H A Dmips-vector-arg.c11 // O32: define void @test_v4sf(i32 %a1.coerce0, i32 %a1.coerce1, i32 %a1.coerce2, i32 %a1.coerce3, i32 %a2, i32, i32 %a3.coerce0, i32 %a3.coerce1, i32 %a3.coerce2, i32 %a3.coerce3) [[NUW:#[0-9]+]]
13 // N64: define void @test_v4sf(i64 %a1.coerce0, i64 %a1.coerce1, i32 %a2, i64, i64 %a3.coerce0, i64 %a3.coerce1) [[NUW:#[0-9]+]]
16 void test_v4sf(v4sf a1, int a2, v4sf a3) { argument
17 test_v4sf_2(a3, a2, a1);
20 // O32: define void @test_v4i32(i32 %a1.coerce0, i32 %a1.coerce1, i32 %a1.coerce2, i32 %a1.coerce3, i32 %a2, i32, i32 %a3.coerce0, i32 %a3.coerce1, i32 %a3.coerce2, i32 %a3.coerce3) [[NUW]]
22 // N64: define void @test_v4i32(i64 %a1.coerce0, i64 %a1.coerce1, i32 %a2, i64, i64 %a3.coerce0, i64 %a3.coerce1) [[NUW]]
25 void test_v4i32(v4i32 a1, int a2, v4i32 a3) { argument
26 test_v4i32_2(a3, a2, a1);
H A Dmips64-padding-arg.c12 // N64-LABEL: define void @foo1(i32 %a0, i64, double %a1.coerce0, i64 %a1.coerce1, i64 %a1.coerce2, i64 %a1.coerce3, double %a2.coerce0, i64 %a2.coerce1, i64 %a2.coerce2, i64 %a2.coerce3, i32 %b, i64, double %a3.coerce0, i64 %a3.coerce1, i64 %a3.coerce2, i64 %a3.coerce3)
13 // N64: tail call void @foo2(i32 1, i32 2, i32 %a0, i64 undef, double %a1.coerce0, i64 %a1.coerce1, i64 %a1.coerce2, i64 %a1.coerce3, double %a2.coerce0, i64 %a2.coerce1, i64 %a2.coerce2, i64 %a2.coerce3, i32 3, i64 undef, double %a3.coerce0, i64 %a3.coerce1, i64 %a3.coerce2, i64 %a3.coerce3)
18 void foo1(int a0, S0 a1, S0 a2, int b, S0 a3) { argument
19 foo2(1, 2, a0, a1, a2,
[all...]
H A Dmrtd.c20 void quux(int a1, int a2, int a3) { argument
21 qux(a1, a2, a3);
H A Dms_struct-bitfield-1.c19 static int a2[(sizeof(t2) == 4) -1]; variable
H A Dms_struct-bitfield-2.c128 static int a2[(sizeof (struct_2) == size_struct_2) -1]; variable

Completed in 493 milliseconds

<<11121314151617181920>>