1// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
2
3// Test ARM64 extract intrinsics
4// can use as back end test by adding a run line with
5// -check-prefix=CHECK-CODEGEN on the FileCheck
6
7#include <arm_neon.h>
8
9void test_vext_s8()
10{
11  // CHECK: test_vext_s8
12  int8x8_t xS8x8;
13  xS8x8 = vext_s8(xS8x8, xS8x8, 1);
14  // CHECK: shufflevector
15  // CHECK-CODEGEN: test_vext_s8:
16  // CHECK-CODEGEN: {{ext.8.*#1}}
17}
18
19void test_vext_u8()
20{
21  // CHECK: test_vext_u8
22  uint8x8_t xU8x8;
23  xU8x8 = vext_u8(xU8x8, xU8x8, 2);
24  // CHECK: shufflevector
25  // CHECK-CODEGEN: test_vext_u8:
26  // CHECK-CODEGEN: {{ext.8.*#2}}
27}
28
29void test_vext_p8()
30{
31  // CHECK: test_vext_p8
32  poly8x8_t xP8x8;
33  xP8x8 = vext_p8(xP8x8, xP8x8, 3);
34  // CHECK: shufflevector
35  // CHECK-CODEGEN: test_vext_p8:
36  // CHECK-CODEGEN: {{ext.8.*#3}}
37}
38
39void test_vext_s16()
40{
41  // CHECK: test_vext_s16
42  int16x4_t xS16x4;
43  xS16x4 = vext_s16(xS16x4, xS16x4, 1);
44  // CHECK: shufflevector
45  // CHECK-CODEGEN: test_vext_s16:
46  // CHECK-CODEGEN: {{ext.8.*#2}}
47}
48
49void test_vext_u16()
50{
51  // CHECK: test_vext_u16
52  uint16x4_t xU16x4;
53  xU16x4 = vext_u16(xU16x4, xU16x4, 2);
54  // CHECK: shufflevector
55  // CHECK-CODEGEN: test_vext_u16:
56  // CHECK-CODEGEN: {{ext.8.*#4}}
57}
58
59void test_vext_p16()
60{
61  // CHECK: test_vext_p16
62  poly16x4_t xP16x4;
63  xP16x4 = vext_p16(xP16x4, xP16x4, 3);
64  // CHECK: shufflevector
65  // CHECK-CODEGEN: test_vext_p16:
66  // CHECK-CODEGEN: {{ext.8.*#6}}
67}
68
69void test_vext_s32()
70{
71  // CHECK: test_vext_s32
72  int32x2_t xS32x2;
73  xS32x2 = vext_s32(xS32x2, xS32x2, 1);
74  // CHECK: shufflevector
75  // CHECK-CODEGEN: test_vext_s32:
76  // CHECK-CODEGEN: {{ext.8.*#4}}
77}
78
79void test_vext_u32()
80{
81  // CHECK: test_vext_u32
82  uint32x2_t xU32x2;
83  xU32x2 = vext_u32(xU32x2, xU32x2, 1);
84  // CHECK: shufflevector
85  // CHECK-CODEGEN: test_vext_u32:
86  // CHECK-CODEGEN: {{ext.8.*#4}}
87}
88
89void test_vext_f32()
90{
91  // CHECK: test_vext_f32
92  float32x2_t xF32x2;
93  xF32x2 = vext_f32(xF32x2, xF32x2, 1);
94  // CHECK: shufflevector
95  // CHECK-CODEGEN: test_vext_f32:
96  // CHECK-CODEGEN: {{ext.8.*#4}}
97}
98
99void test_vext_s64()
100{
101  // CHECK: test_vext_s64
102  int64x1_t xS64x1;
103  // FIXME don't use 1 as index or check for now, clang has a bug?
104  xS64x1 = vext_s64(xS64x1, xS64x1, /*1*/0);
105  // CHECK: shufflevector
106  // CHECK-CODEGEN: test_vext_s64:
107  // CHECK_FIXME: {{ext.8.*#0}}
108}
109
110void test_vext_u64()
111{
112  // CHECK: test_vext_u64
113  uint64x1_t xU64x1;
114  // FIXME don't use 1 as index or check for now, clang has a bug?
115  xU64x1 = vext_u64(xU64x1, xU64x1, /*1*/0);
116  // CHECK: shufflevector
117  // CHECK-CODEGEN: test_vext_u64:
118  // CHECK_FIXME: {{ext.8.*#0}}
119}
120
121void test_vextq_s8()
122{
123  // CHECK: test_vextq_s8
124  int8x16_t xS8x16;
125  xS8x16 = vextq_s8(xS8x16, xS8x16, 4);
126  // CHECK: shufflevector
127  // CHECK-CODEGEN: test_vextq_s8:
128  // CHECK-CODEGEN: {{ext.16.*#4}}
129}
130
131void test_vextq_u8()
132{
133  // CHECK: test_vextq_u8
134  uint8x16_t xU8x16;
135  xU8x16 = vextq_u8(xU8x16, xU8x16, 5);
136  // CHECK: shufflevector
137  // CHECK-CODEGEN: test_vextq_u8:
138  // CHECK-CODEGEN: {{ext.16.*#5}}
139}
140
141void test_vextq_p8()
142{
143  // CHECK: test_vextq_p8
144  poly8x16_t xP8x16;
145  xP8x16 = vextq_p8(xP8x16, xP8x16, 6);
146  // CHECK: shufflevector
147  // CHECK-CODEGEN: test_vextq_p8:
148  // CHECK-CODEGEN: {{ext.16.*#6}}
149}
150
151void test_vextq_s16()
152{
153  // CHECK: test_vextq_s16
154  int16x8_t xS16x8;
155  xS16x8 = vextq_s16(xS16x8, xS16x8, 7);
156  // CHECK: shufflevector
157  // CHECK-CODEGEN: test_vextq_s16:
158  // CHECK-CODEGEN: {{ext.16.*#14}}
159}
160
161void test_vextq_u16()
162{
163  // CHECK: test_vextq_u16
164  uint16x8_t xU16x8;
165  xU16x8 = vextq_u16(xU16x8, xU16x8, 4);
166  // CHECK: shufflevector
167  // CHECK-CODEGEN: test_vextq_u16:
168  // CHECK-CODEGEN: {{ext.16.*#8}}
169}
170
171void test_vextq_p16()
172{
173  // CHECK: test_vextq_p16
174  poly16x8_t xP16x8;
175  xP16x8 = vextq_p16(xP16x8, xP16x8, 5);
176  // CHECK: shufflevector
177  // CHECK-CODEGEN: test_vextq_p16:
178  // CHECK-CODEGEN: {{ext.16.*#10}}
179}
180
181void test_vextq_s32()
182{
183  // CHECK: test_vextq_s32
184  int32x4_t xS32x4;
185  xS32x4 = vextq_s32(xS32x4, xS32x4, 1);
186  // CHECK: shufflevector
187  // CHECK-CODEGEN: test_vextq_s32:
188  // CHECK-CODEGEN: {{ext.16.*#4}}
189}
190
191void test_vextq_u32()
192{
193  // CHECK: test_vextq_u32
194  uint32x4_t xU32x4;
195  xU32x4 = vextq_u32(xU32x4, xU32x4, 2);
196  // CHECK: shufflevector
197  // CHECK-CODEGEN: test_vextq_u32:
198  // CHECK-CODEGEN: {{ext.16.*#8}}
199}
200
201void test_vextq_f32()
202{
203  // CHECK: test_vextq_f32
204  float32x4_t xF32x4;
205  xF32x4 = vextq_f32(xF32x4, xF32x4, 3);
206  // CHECK: shufflevector
207  // CHECK-CODEGEN: test_vextq_f32:
208  // CHECK-CODEGEN: {{ext.16.*#12}}
209}
210
211void test_vextq_s64()
212{
213  // CHECK: test_vextq_s64
214  int64x2_t xS64x2;
215  xS64x2 = vextq_s64(xS64x2, xS64x2, 1);
216  // CHECK: shufflevector
217  // CHECK-CODEGEN: test_vextq_s64:
218  // CHECK-CODEGEN: {{ext.16.*#8}}
219}
220
221void test_vextq_u64()
222{
223  // CHECK: test_vextq_u64
224  uint64x2_t xU64x2;
225  xU64x2 = vextq_u64(xU64x2, xU64x2, 1);
226  // CHECK: shufflevector
227  // CHECK-CODEGEN: test_vextq_u64:
228  // CHECK-CODEGEN: {{ext.16.*#8}}
229}
230
231void test_vextq_f64()
232{
233  // CHECK: test_vextq_f64
234  float64x2_t xF64x2;
235  xF64x2 = vextq_f64(xF64x2, xF64x2, 1);
236  // CHECK: shufflevector
237  // CHECK-CODEGEN: test_vextq_u64:
238  // CHECK-CODEGEN: {{ext.16.*#8}}
239}
240